diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/002_utils_files/figure-html/cell-103-output-1.png b/002_utils_files/figure-html/cell-103-output-1.png new file mode 100644 index 000000000..268fdb04f Binary files /dev/null and b/002_utils_files/figure-html/cell-103-output-1.png differ diff --git a/002_utils_files/figure-html/cell-124-output-1.png b/002_utils_files/figure-html/cell-124-output-1.png new file mode 100644 index 000000000..85ef0c875 Binary files /dev/null and b/002_utils_files/figure-html/cell-124-output-1.png differ diff --git a/002_utils_files/figure-html/cell-189-output-1.png b/002_utils_files/figure-html/cell-189-output-1.png new file mode 100644 index 000000000..d2214c908 Binary files /dev/null and b/002_utils_files/figure-html/cell-189-output-1.png differ diff --git a/002_utils_files/figure-html/cell-194-output-1.png b/002_utils_files/figure-html/cell-194-output-1.png new file mode 100644 index 000000000..1d0d43a6e Binary files /dev/null and b/002_utils_files/figure-html/cell-194-output-1.png differ diff --git a/002_utils_files/figure-html/cell-196-output-1.png b/002_utils_files/figure-html/cell-196-output-1.png new file mode 100644 index 000000000..ceeb0f65a Binary files /dev/null and b/002_utils_files/figure-html/cell-196-output-1.png differ diff --git a/002_utils_files/figure-html/cell-198-output-1.png b/002_utils_files/figure-html/cell-198-output-1.png new file mode 100644 index 000000000..d7c104710 Binary files /dev/null and b/002_utils_files/figure-html/cell-198-output-1.png differ diff --git a/002_utils_files/figure-html/cell-199-output-1.png b/002_utils_files/figure-html/cell-199-output-1.png new file mode 100644 index 000000000..7ea462d76 Binary files /dev/null and b/002_utils_files/figure-html/cell-199-output-1.png differ diff --git a/002_utils_files/figure-html/cell-229-output-1.png b/002_utils_files/figure-html/cell-229-output-1.png new file mode 100644 index 000000000..d6eb41725 Binary files /dev/null and b/002_utils_files/figure-html/cell-229-output-1.png differ diff --git a/002_utils_files/figure-html/cell-230-output-1.png b/002_utils_files/figure-html/cell-230-output-1.png new file mode 100644 index 000000000..d6eb41725 Binary files /dev/null and b/002_utils_files/figure-html/cell-230-output-1.png differ diff --git a/002_utils_files/figure-html/cell-232-output-1.png b/002_utils_files/figure-html/cell-232-output-1.png new file mode 100644 index 000000000..9c0f43701 Binary files /dev/null and b/002_utils_files/figure-html/cell-232-output-1.png differ diff --git a/002_utils_files/figure-html/cell-234-output-1.png b/002_utils_files/figure-html/cell-234-output-1.png new file mode 100644 index 000000000..91c064925 Binary files /dev/null and b/002_utils_files/figure-html/cell-234-output-1.png differ diff --git a/002_utils_files/figure-html/cell-234-output-2.png b/002_utils_files/figure-html/cell-234-output-2.png new file mode 100644 index 000000000..b1abb8989 Binary files /dev/null and b/002_utils_files/figure-html/cell-234-output-2.png differ diff --git a/002_utils_files/figure-html/cell-234-output-3.png b/002_utils_files/figure-html/cell-234-output-3.png new file mode 100644 index 000000000..1d43b0ca5 Binary files /dev/null and b/002_utils_files/figure-html/cell-234-output-3.png differ diff --git a/002_utils_files/figure-html/cell-251-output-2.png b/002_utils_files/figure-html/cell-251-output-2.png new file mode 100644 index 000000000..23159afb1 Binary files /dev/null and b/002_utils_files/figure-html/cell-251-output-2.png differ diff --git a/002_utils_files/figure-html/cell-252-output-2.png b/002_utils_files/figure-html/cell-252-output-2.png new file mode 100644 index 000000000..44c2f35c5 Binary files /dev/null and b/002_utils_files/figure-html/cell-252-output-2.png differ diff --git a/002_utils_files/figure-html/cell-252-output-3.png b/002_utils_files/figure-html/cell-252-output-3.png new file mode 100644 index 000000000..d86a07807 Binary files /dev/null and b/002_utils_files/figure-html/cell-252-output-3.png differ diff --git a/003_data.validation_files/figure-html/cell-16-output-1.png b/003_data.validation_files/figure-html/cell-16-output-1.png new file mode 100644 index 000000000..c2e05447b Binary files /dev/null and b/003_data.validation_files/figure-html/cell-16-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-17-output-2.png b/003_data.validation_files/figure-html/cell-17-output-2.png new file mode 100644 index 000000000..44083d8df Binary files /dev/null and b/003_data.validation_files/figure-html/cell-17-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-19-output-1.png b/003_data.validation_files/figure-html/cell-19-output-1.png new file mode 100644 index 000000000..3bcb641f3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-19-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-19-output-2.png b/003_data.validation_files/figure-html/cell-19-output-2.png new file mode 100644 index 000000000..508866ccf Binary files /dev/null and b/003_data.validation_files/figure-html/cell-19-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-21-output-1.png b/003_data.validation_files/figure-html/cell-21-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-21-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-21-output-2.png b/003_data.validation_files/figure-html/cell-21-output-2.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-21-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-21-output-3.png b/003_data.validation_files/figure-html/cell-21-output-3.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-21-output-3.png differ diff --git a/003_data.validation_files/figure-html/cell-21-output-4.png b/003_data.validation_files/figure-html/cell-21-output-4.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-21-output-4.png differ diff --git a/003_data.validation_files/figure-html/cell-22-output-2.png b/003_data.validation_files/figure-html/cell-22-output-2.png new file mode 100644 index 000000000..a7f185819 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-22-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-24-output-2.png b/003_data.validation_files/figure-html/cell-24-output-2.png new file mode 100644 index 000000000..076bf7f0a Binary files /dev/null and b/003_data.validation_files/figure-html/cell-24-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-25-output-1.png b/003_data.validation_files/figure-html/cell-25-output-1.png new file mode 100644 index 000000000..3fd73c9c1 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-25-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-25-output-2.png b/003_data.validation_files/figure-html/cell-25-output-2.png new file mode 100644 index 000000000..e435f58a3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-25-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-26-output-1.png b/003_data.validation_files/figure-html/cell-26-output-1.png new file mode 100644 index 000000000..7dbb3d8d1 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-26-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-27-output-1.png b/003_data.validation_files/figure-html/cell-27-output-1.png new file mode 100644 index 000000000..59b1bc95e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-27-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-27-output-2.png b/003_data.validation_files/figure-html/cell-27-output-2.png new file mode 100644 index 000000000..59b1bc95e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-27-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-27-output-3.png b/003_data.validation_files/figure-html/cell-27-output-3.png new file mode 100644 index 000000000..2fd2c9a85 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-27-output-3.png differ diff --git a/003_data.validation_files/figure-html/cell-27-output-4.png b/003_data.validation_files/figure-html/cell-27-output-4.png new file mode 100644 index 000000000..3e611b3b0 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-27-output-4.png differ diff --git a/003_data.validation_files/figure-html/cell-28-output-1.png b/003_data.validation_files/figure-html/cell-28-output-1.png new file mode 100644 index 000000000..e6080d6d3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-28-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-29-output-1.png b/003_data.validation_files/figure-html/cell-29-output-1.png new file mode 100644 index 000000000..3bc7f0276 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-29-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-30-output-1.png b/003_data.validation_files/figure-html/cell-30-output-1.png new file mode 100644 index 000000000..f116b1fa1 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-30-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-32-output-1.png b/003_data.validation_files/figure-html/cell-32-output-1.png new file mode 100644 index 000000000..c60ba42ac Binary files /dev/null and b/003_data.validation_files/figure-html/cell-32-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-32-output-2.png b/003_data.validation_files/figure-html/cell-32-output-2.png new file mode 100644 index 000000000..8a21c5e79 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-32-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-32-output-3.png b/003_data.validation_files/figure-html/cell-32-output-3.png new file mode 100644 index 000000000..9a06c6f25 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-32-output-3.png differ diff --git a/003_data.validation_files/figure-html/cell-32-output-4.png b/003_data.validation_files/figure-html/cell-32-output-4.png new file mode 100644 index 000000000..35795a3ee Binary files /dev/null and b/003_data.validation_files/figure-html/cell-32-output-4.png differ diff --git a/003_data.validation_files/figure-html/cell-32-output-5.png b/003_data.validation_files/figure-html/cell-32-output-5.png new file mode 100644 index 000000000..dd48338bf Binary files /dev/null and b/003_data.validation_files/figure-html/cell-32-output-5.png differ diff --git a/003_data.validation_files/figure-html/cell-33-output-1.png b/003_data.validation_files/figure-html/cell-33-output-1.png new file mode 100644 index 000000000..1d8b32876 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-33-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-34-output-1.png b/003_data.validation_files/figure-html/cell-34-output-1.png new file mode 100644 index 000000000..359b7e8c4 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-34-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-35-output-1.png b/003_data.validation_files/figure-html/cell-35-output-1.png new file mode 100644 index 000000000..d73b7987e Binary files /dev/null and b/003_data.validation_files/figure-html/cell-35-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-36-output-1.png b/003_data.validation_files/figure-html/cell-36-output-1.png new file mode 100644 index 000000000..368d50ff3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-36-output-1.png differ diff --git a/003_data.validation_files/figure-html/cell-46-output-2.png b/003_data.validation_files/figure-html/cell-46-output-2.png new file mode 100644 index 000000000..486fdcac3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-46-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-46-output-4.png b/003_data.validation_files/figure-html/cell-46-output-4.png new file mode 100644 index 000000000..1fa85baab Binary files /dev/null and b/003_data.validation_files/figure-html/cell-46-output-4.png differ diff --git a/003_data.validation_files/figure-html/cell-47-output-2.png b/003_data.validation_files/figure-html/cell-47-output-2.png new file mode 100644 index 000000000..486fdcac3 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-47-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-47-output-4.png b/003_data.validation_files/figure-html/cell-47-output-4.png new file mode 100644 index 000000000..1fa85baab Binary files /dev/null and b/003_data.validation_files/figure-html/cell-47-output-4.png differ diff --git a/003_data.validation_files/figure-html/cell-48-output-2.png b/003_data.validation_files/figure-html/cell-48-output-2.png new file mode 100644 index 000000000..ece940a83 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-48-output-2.png differ diff --git a/003_data.validation_files/figure-html/cell-49-output-2.png b/003_data.validation_files/figure-html/cell-49-output-2.png new file mode 100644 index 000000000..0810896e5 Binary files /dev/null and b/003_data.validation_files/figure-html/cell-49-output-2.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-1.png b/004_data.preparation_files/figure-html/cell-43-output-1.png new file mode 100644 index 000000000..aaeb746c1 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-1.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-2.png b/004_data.preparation_files/figure-html/cell-43-output-2.png new file mode 100644 index 000000000..f5059269a Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-2.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-3.png b/004_data.preparation_files/figure-html/cell-43-output-3.png new file mode 100644 index 000000000..a6e67dba8 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-3.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-4.png b/004_data.preparation_files/figure-html/cell-43-output-4.png new file mode 100644 index 000000000..6a5a5e0ae Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-4.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-5.png b/004_data.preparation_files/figure-html/cell-43-output-5.png new file mode 100644 index 000000000..dfa6e6329 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-5.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-6.png b/004_data.preparation_files/figure-html/cell-43-output-6.png new file mode 100644 index 000000000..54d345867 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-6.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-7.png b/004_data.preparation_files/figure-html/cell-43-output-7.png new file mode 100644 index 000000000..ef1b78ec0 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-7.png differ diff --git a/004_data.preparation_files/figure-html/cell-43-output-8.png b/004_data.preparation_files/figure-html/cell-43-output-8.png new file mode 100644 index 000000000..fb5e85216 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-43-output-8.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-1.png b/004_data.preparation_files/figure-html/cell-44-output-1.png new file mode 100644 index 000000000..aaeb746c1 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-1.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-2.png b/004_data.preparation_files/figure-html/cell-44-output-2.png new file mode 100644 index 000000000..f5059269a Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-2.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-3.png b/004_data.preparation_files/figure-html/cell-44-output-3.png new file mode 100644 index 000000000..a6e67dba8 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-3.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-4.png b/004_data.preparation_files/figure-html/cell-44-output-4.png new file mode 100644 index 000000000..6a5a5e0ae Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-4.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-5.png b/004_data.preparation_files/figure-html/cell-44-output-5.png new file mode 100644 index 000000000..dfa6e6329 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-5.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-6.png b/004_data.preparation_files/figure-html/cell-44-output-6.png new file mode 100644 index 000000000..54d345867 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-6.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-7.png b/004_data.preparation_files/figure-html/cell-44-output-7.png new file mode 100644 index 000000000..ef1b78ec0 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-7.png differ diff --git a/004_data.preparation_files/figure-html/cell-44-output-8.png b/004_data.preparation_files/figure-html/cell-44-output-8.png new file mode 100644 index 000000000..fb5e85216 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-44-output-8.png differ diff --git a/004_data.preparation_files/figure-html/cell-45-output-1.png b/004_data.preparation_files/figure-html/cell-45-output-1.png new file mode 100644 index 000000000..c872922d5 Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-45-output-1.png differ diff --git a/004_data.preparation_files/figure-html/cell-71-output-2.png b/004_data.preparation_files/figure-html/cell-71-output-2.png new file mode 100644 index 000000000..6d676570a Binary files /dev/null and b/004_data.preparation_files/figure-html/cell-71-output-2.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-10.png b/005_data.external_files/figure-html/cell-12-output-10.png new file mode 100644 index 000000000..03e75454b Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-10.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-2.png b/005_data.external_files/figure-html/cell-12-output-2.png new file mode 100644 index 000000000..06c5e1424 Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-2.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-3.png b/005_data.external_files/figure-html/cell-12-output-3.png new file mode 100644 index 000000000..06c5e1424 Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-3.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-4.png b/005_data.external_files/figure-html/cell-12-output-4.png new file mode 100644 index 000000000..06c5e1424 Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-4.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-6.png b/005_data.external_files/figure-html/cell-12-output-6.png new file mode 100644 index 000000000..06c5e1424 Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-6.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-7.png b/005_data.external_files/figure-html/cell-12-output-7.png new file mode 100644 index 000000000..03e75454b Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-7.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-8.png b/005_data.external_files/figure-html/cell-12-output-8.png new file mode 100644 index 000000000..03e75454b Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-8.png differ diff --git a/005_data.external_files/figure-html/cell-12-output-9.png b/005_data.external_files/figure-html/cell-12-output-9.png new file mode 100644 index 000000000..03e75454b Binary files /dev/null and b/005_data.external_files/figure-html/cell-12-output-9.png differ diff --git a/006_data.core_files/figure-html/cell-112-output-1.png b/006_data.core_files/figure-html/cell-112-output-1.png new file mode 100644 index 000000000..e8a61b0e9 Binary files /dev/null and b/006_data.core_files/figure-html/cell-112-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-112-output-2.png b/006_data.core_files/figure-html/cell-112-output-2.png new file mode 100644 index 000000000..e8a61b0e9 Binary files /dev/null and b/006_data.core_files/figure-html/cell-112-output-2.png differ diff --git a/006_data.core_files/figure-html/cell-117-output-1.png b/006_data.core_files/figure-html/cell-117-output-1.png new file mode 100644 index 000000000..fc730d823 Binary files /dev/null and b/006_data.core_files/figure-html/cell-117-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-117-output-2.png b/006_data.core_files/figure-html/cell-117-output-2.png new file mode 100644 index 000000000..fc730d823 Binary files /dev/null and b/006_data.core_files/figure-html/cell-117-output-2.png differ diff --git a/006_data.core_files/figure-html/cell-118-output-1.png b/006_data.core_files/figure-html/cell-118-output-1.png new file mode 100644 index 000000000..c1846a720 Binary files /dev/null and b/006_data.core_files/figure-html/cell-118-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-118-output-2.png b/006_data.core_files/figure-html/cell-118-output-2.png new file mode 100644 index 000000000..c1846a720 Binary files /dev/null and b/006_data.core_files/figure-html/cell-118-output-2.png differ diff --git a/006_data.core_files/figure-html/cell-119-output-1.png b/006_data.core_files/figure-html/cell-119-output-1.png new file mode 100644 index 000000000..d46cba7f1 Binary files /dev/null and b/006_data.core_files/figure-html/cell-119-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-119-output-2.png b/006_data.core_files/figure-html/cell-119-output-2.png new file mode 100644 index 000000000..d46cba7f1 Binary files /dev/null and b/006_data.core_files/figure-html/cell-119-output-2.png differ diff --git a/006_data.core_files/figure-html/cell-126-output-1.png b/006_data.core_files/figure-html/cell-126-output-1.png new file mode 100644 index 000000000..c939f9c64 Binary files /dev/null and b/006_data.core_files/figure-html/cell-126-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-126-output-2.png b/006_data.core_files/figure-html/cell-126-output-2.png new file mode 100644 index 000000000..8cbca85ee Binary files /dev/null and b/006_data.core_files/figure-html/cell-126-output-2.png differ diff --git a/006_data.core_files/figure-html/cell-20-output-1.png b/006_data.core_files/figure-html/cell-20-output-1.png new file mode 100644 index 000000000..b7d16f21b Binary files /dev/null and b/006_data.core_files/figure-html/cell-20-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-21-output-1.png b/006_data.core_files/figure-html/cell-21-output-1.png new file mode 100644 index 000000000..ae3b7b430 Binary files /dev/null and b/006_data.core_files/figure-html/cell-21-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-22-output-1.png b/006_data.core_files/figure-html/cell-22-output-1.png new file mode 100644 index 000000000..df679a668 Binary files /dev/null and b/006_data.core_files/figure-html/cell-22-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-23-output-1.png b/006_data.core_files/figure-html/cell-23-output-1.png new file mode 100644 index 000000000..865dfc322 Binary files /dev/null and b/006_data.core_files/figure-html/cell-23-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-24-output-1.png b/006_data.core_files/figure-html/cell-24-output-1.png new file mode 100644 index 000000000..865dfc322 Binary files /dev/null and b/006_data.core_files/figure-html/cell-24-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-25-output-1.png b/006_data.core_files/figure-html/cell-25-output-1.png new file mode 100644 index 000000000..1a7889669 Binary files /dev/null and b/006_data.core_files/figure-html/cell-25-output-1.png differ diff --git a/006_data.core_files/figure-html/cell-26-output-1.png b/006_data.core_files/figure-html/cell-26-output-1.png new file mode 100644 index 000000000..d3ec99ec7 Binary files /dev/null and b/006_data.core_files/figure-html/cell-26-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-134-output-1.png b/009_data.preprocessing_files/figure-html/cell-134-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-134-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-134-output-2.png b/009_data.preprocessing_files/figure-html/cell-134-output-2.png new file mode 100644 index 000000000..9a3082ed3 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-134-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-135-output-1.png b/009_data.preprocessing_files/figure-html/cell-135-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-135-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-135-output-2.png b/009_data.preprocessing_files/figure-html/cell-135-output-2.png new file mode 100644 index 000000000..e6442ad77 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-135-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-136-output-1.png b/009_data.preprocessing_files/figure-html/cell-136-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-136-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-136-output-2.png b/009_data.preprocessing_files/figure-html/cell-136-output-2.png new file mode 100644 index 000000000..7a48ab713 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-136-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-137-output-1.png b/009_data.preprocessing_files/figure-html/cell-137-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-137-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-137-output-2.png b/009_data.preprocessing_files/figure-html/cell-137-output-2.png new file mode 100644 index 000000000..b1a094884 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-137-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-138-output-1.png b/009_data.preprocessing_files/figure-html/cell-138-output-1.png new file mode 100644 index 000000000..045dca98e Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-138-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-138-output-2.png b/009_data.preprocessing_files/figure-html/cell-138-output-2.png new file mode 100644 index 000000000..0e98fcf7d Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-138-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-139-output-1.png b/009_data.preprocessing_files/figure-html/cell-139-output-1.png new file mode 100644 index 000000000..cf18c230c Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-139-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-139-output-2.png b/009_data.preprocessing_files/figure-html/cell-139-output-2.png new file mode 100644 index 000000000..723e1b3d8 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-139-output-2.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-139-output-3.png b/009_data.preprocessing_files/figure-html/cell-139-output-3.png new file mode 100644 index 000000000..f4bae705c Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-139-output-3.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-53-output-1.png b/009_data.preprocessing_files/figure-html/cell-53-output-1.png new file mode 100644 index 000000000..ec3817a07 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-53-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-54-output-1.png b/009_data.preprocessing_files/figure-html/cell-54-output-1.png new file mode 100644 index 000000000..01b45b8e9 Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-54-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-56-output-1.png b/009_data.preprocessing_files/figure-html/cell-56-output-1.png new file mode 100644 index 000000000..94a1d10ae Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-56-output-1.png differ diff --git a/009_data.preprocessing_files/figure-html/cell-57-output-1.png b/009_data.preprocessing_files/figure-html/cell-57-output-1.png new file mode 100644 index 000000000..5780af94c Binary files /dev/null and b/009_data.preprocessing_files/figure-html/cell-57-output-1.png differ diff --git a/010_data.transforms_files/figure-html/cell-9-output-1.png b/010_data.transforms_files/figure-html/cell-9-output-1.png new file mode 100644 index 000000000..d2875db89 Binary files /dev/null and b/010_data.transforms_files/figure-html/cell-9-output-1.png differ diff --git a/012_data.image_files/figure-html/cell-15-output-2.png b/012_data.image_files/figure-html/cell-15-output-2.png new file mode 100644 index 000000000..c8433b962 Binary files /dev/null and b/012_data.image_files/figure-html/cell-15-output-2.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-2.png b/012_data.image_files/figure-html/cell-18-output-2.png new file mode 100644 index 000000000..900f7bdc1 Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-2.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-3.png b/012_data.image_files/figure-html/cell-18-output-3.png new file mode 100644 index 000000000..73474f0eb Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-3.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-4.png b/012_data.image_files/figure-html/cell-18-output-4.png new file mode 100644 index 000000000..69eca06d4 Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-4.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-5.png b/012_data.image_files/figure-html/cell-18-output-5.png new file mode 100644 index 000000000..6545aca98 Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-5.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-6.png b/012_data.image_files/figure-html/cell-18-output-6.png new file mode 100644 index 000000000..fd244acb2 Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-6.png differ diff --git a/012_data.image_files/figure-html/cell-18-output-7.png b/012_data.image_files/figure-html/cell-18-output-7.png new file mode 100644 index 000000000..9647a7c23 Binary files /dev/null and b/012_data.image_files/figure-html/cell-18-output-7.png differ diff --git a/012_data.image_files/figure-html/cell-6-output-2.png b/012_data.image_files/figure-html/cell-6-output-2.png new file mode 100644 index 000000000..4653877d9 Binary files /dev/null and b/012_data.image_files/figure-html/cell-6-output-2.png differ diff --git a/012_data.image_files/figure-html/cell-8-output-2.png b/012_data.image_files/figure-html/cell-8-output-2.png new file mode 100644 index 000000000..c7f61939e Binary files /dev/null and b/012_data.image_files/figure-html/cell-8-output-2.png differ diff --git a/012_data.image_files/figure-html/cell-9-output-2.png b/012_data.image_files/figure-html/cell-9-output-2.png new file mode 100644 index 000000000..b9fb11781 Binary files /dev/null and b/012_data.image_files/figure-html/cell-9-output-2.png differ diff --git a/018_learner_files/figure-html/cell-20-output-1.png b/018_learner_files/figure-html/cell-20-output-1.png new file mode 100644 index 000000000..cfb85d28e Binary files /dev/null and b/018_learner_files/figure-html/cell-20-output-1.png differ diff --git a/018_learner_files/figure-html/cell-22-output-3.png b/018_learner_files/figure-html/cell-22-output-3.png new file mode 100644 index 000000000..5c83702fc Binary files /dev/null and b/018_learner_files/figure-html/cell-22-output-3.png differ diff --git a/020_analysis_files/figure-html/cell-10-output-16.png b/020_analysis_files/figure-html/cell-10-output-16.png new file mode 100644 index 000000000..db499f98a Binary files /dev/null and b/020_analysis_files/figure-html/cell-10-output-16.png differ diff --git a/020_analysis_files/figure-html/cell-11-output-18.png b/020_analysis_files/figure-html/cell-11-output-18.png new file mode 100644 index 000000000..695590144 Binary files /dev/null and b/020_analysis_files/figure-html/cell-11-output-18.png differ diff --git a/020_analysis_files/figure-html/cell-9-output-16.png b/020_analysis_files/figure-html/cell-9-output-16.png new file mode 100644 index 000000000..76fe157d9 Binary files /dev/null and b/020_analysis_files/figure-html/cell-9-output-16.png differ diff --git a/020_analysis_files/figure-html/cell-9-output-17.png b/020_analysis_files/figure-html/cell-9-output-17.png new file mode 100644 index 000000000..e85aa57e8 Binary files /dev/null and b/020_analysis_files/figure-html/cell-9-output-17.png differ diff --git a/020_analysis_files/figure-html/cell-9-output-3.png b/020_analysis_files/figure-html/cell-9-output-3.png new file mode 100644 index 000000000..e3d59d387 Binary files /dev/null and b/020_analysis_files/figure-html/cell-9-output-3.png differ diff --git a/020_analysis_files/figure-html/cell-9-output-6.png b/020_analysis_files/figure-html/cell-9-output-6.png new file mode 100644 index 000000000..bce16c33f Binary files /dev/null and b/020_analysis_files/figure-html/cell-9-output-6.png differ diff --git a/021_calibration_files/figure-html/cell-9-output-2.png b/021_calibration_files/figure-html/cell-9-output-2.png new file mode 100644 index 000000000..03539dd7c Binary files /dev/null and b/021_calibration_files/figure-html/cell-9-output-2.png differ diff --git a/022_tslearner_files/figure-html/cell-11-output-2.png b/022_tslearner_files/figure-html/cell-11-output-2.png new file mode 100644 index 000000000..72f2c0999 Binary files /dev/null and b/022_tslearner_files/figure-html/cell-11-output-2.png differ diff --git a/024_callback.core_files/figure-html/cell-10-output-1.png b/024_callback.core_files/figure-html/cell-10-output-1.png new file mode 100644 index 000000000..dd93f388c Binary files /dev/null and b/024_callback.core_files/figure-html/cell-10-output-1.png differ diff --git a/024_callback.core_files/figure-html/cell-10-output-2.png b/024_callback.core_files/figure-html/cell-10-output-2.png new file mode 100644 index 000000000..c7be489fc Binary files /dev/null and b/024_callback.core_files/figure-html/cell-10-output-2.png differ diff --git a/024_callback.core_files/figure-html/cell-10-output-3.png b/024_callback.core_files/figure-html/cell-10-output-3.png new file mode 100644 index 000000000..abea27986 Binary files /dev/null and b/024_callback.core_files/figure-html/cell-10-output-3.png differ diff --git a/024_callback.core_files/figure-html/cell-4-output-1.png b/024_callback.core_files/figure-html/cell-4-output-1.png new file mode 100644 index 000000000..af5d400bf Binary files /dev/null and b/024_callback.core_files/figure-html/cell-4-output-1.png differ diff --git a/024_callback.core_files/figure-html/cell-5-output-1.png b/024_callback.core_files/figure-html/cell-5-output-1.png new file mode 100644 index 000000000..9c3c01199 Binary files /dev/null and b/024_callback.core_files/figure-html/cell-5-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-10-output-1.png b/027_callback.MVP_files/figure-html/cell-10-output-1.png new file mode 100644 index 000000000..e1d59b30e Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-10-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-11-output-1.png b/027_callback.MVP_files/figure-html/cell-11-output-1.png new file mode 100644 index 000000000..94230cd5d Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-11-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-12-output-1.png b/027_callback.MVP_files/figure-html/cell-12-output-1.png new file mode 100644 index 000000000..79b59f7a7 Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-12-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-13-output-1.png b/027_callback.MVP_files/figure-html/cell-13-output-1.png new file mode 100644 index 000000000..89a7ec86d Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-13-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-14-output-1.png b/027_callback.MVP_files/figure-html/cell-14-output-1.png new file mode 100644 index 000000000..76d2da0a6 Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-14-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-15-output-1.png b/027_callback.MVP_files/figure-html/cell-15-output-1.png new file mode 100644 index 000000000..cbf8a6fd2 Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-15-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-22-output-1.png b/027_callback.MVP_files/figure-html/cell-22-output-1.png new file mode 100644 index 000000000..2abb4c6df Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-22-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-8-output-1.png b/027_callback.MVP_files/figure-html/cell-8-output-1.png new file mode 100644 index 000000000..8ba28fbcc Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-8-output-1.png differ diff --git a/027_callback.MVP_files/figure-html/cell-8-output-2.png b/027_callback.MVP_files/figure-html/cell-8-output-2.png new file mode 100644 index 000000000..ce7894873 Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-8-output-2.png differ diff --git a/027_callback.MVP_files/figure-html/cell-9-output-1.png b/027_callback.MVP_files/figure-html/cell-9-output-1.png new file mode 100644 index 000000000..e4c2c8db1 Binary files /dev/null and b/027_callback.MVP_files/figure-html/cell-9-output-1.png differ diff --git a/028_callback.PredictionDynamics_files/figure-html/cell-5-output-3.png b/028_callback.PredictionDynamics_files/figure-html/cell-5-output-3.png new file mode 100644 index 000000000..14cb7c3aa Binary files /dev/null and b/028_callback.PredictionDynamics_files/figure-html/cell-5-output-3.png differ diff --git a/029_models.layers_files/figure-html/cell-58-output-1.png b/029_models.layers_files/figure-html/cell-58-output-1.png new file mode 100644 index 000000000..abc5788ec Binary files /dev/null and b/029_models.layers_files/figure-html/cell-58-output-1.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-3-output-1.png b/031_models.positional_encoders_files/figure-html/cell-3-output-1.png new file mode 100644 index 000000000..3090e140f Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-3-output-1.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-5-output-1.png b/031_models.positional_encoders_files/figure-html/cell-5-output-1.png new file mode 100644 index 000000000..2a3ff0f86 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-5-output-1.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-5-output-2.png b/031_models.positional_encoders_files/figure-html/cell-5-output-2.png new file mode 100644 index 000000000..083a57ec1 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-5-output-2.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-5-output-3.png b/031_models.positional_encoders_files/figure-html/cell-5-output-3.png new file mode 100644 index 000000000..7f756cbba Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-5-output-3.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-7-output-1.png b/031_models.positional_encoders_files/figure-html/cell-7-output-1.png new file mode 100644 index 000000000..08edb3bf5 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-7-output-1.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-7-output-2.png b/031_models.positional_encoders_files/figure-html/cell-7-output-2.png new file mode 100644 index 000000000..3231a0ee7 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-7-output-2.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-8-output-1.png b/031_models.positional_encoders_files/figure-html/cell-8-output-1.png new file mode 100644 index 000000000..08edb3bf5 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-8-output-1.png differ diff --git a/031_models.positional_encoders_files/figure-html/cell-8-output-2.png b/031_models.positional_encoders_files/figure-html/cell-8-output-2.png new file mode 100644 index 000000000..3231a0ee7 Binary files /dev/null and b/031_models.positional_encoders_files/figure-html/cell-8-output-2.png differ diff --git a/050_models.TSTPlus_files/figure-html/cell-15-output-1.png b/050_models.TSTPlus_files/figure-html/cell-15-output-1.png new file mode 100644 index 000000000..951e9a158 Binary files /dev/null and b/050_models.TSTPlus_files/figure-html/cell-15-output-1.png differ diff --git a/050_models.TSTPlus_files/figure-html/cell-15-output-2.png b/050_models.TSTPlus_files/figure-html/cell-15-output-2.png new file mode 100644 index 000000000..5e45243c2 Binary files /dev/null and b/050_models.TSTPlus_files/figure-html/cell-15-output-2.png differ diff --git a/050_models.TSTPlus_files/figure-html/cell-3-output-1.png b/050_models.TSTPlus_files/figure-html/cell-3-output-1.png new file mode 100644 index 000000000..6ba14e8a6 Binary files /dev/null and b/050_models.TSTPlus_files/figure-html/cell-3-output-1.png differ diff --git a/050b_models.PatchTST_files/figure-html/cell-2-1-image.png b/050b_models.PatchTST_files/figure-html/cell-2-1-image.png new file mode 100644 index 000000000..98e8a59e2 Binary files /dev/null and b/050b_models.PatchTST_files/figure-html/cell-2-1-image.png differ diff --git a/061_models.XCM_files/figure-html/cell-5-output-1.png b/061_models.XCM_files/figure-html/cell-5-output-1.png new file mode 100644 index 000000000..55c03f606 Binary files /dev/null and b/061_models.XCM_files/figure-html/cell-5-output-1.png differ diff --git a/061_models.XCM_files/figure-html/cell-5-output-2.png b/061_models.XCM_files/figure-html/cell-5-output-2.png new file mode 100644 index 000000000..537485935 Binary files /dev/null and b/061_models.XCM_files/figure-html/cell-5-output-2.png differ diff --git a/061_models.XCM_files/figure-html/cell-6-output-2.png b/061_models.XCM_files/figure-html/cell-6-output-2.png new file mode 100644 index 000000000..78fe159d8 Binary files /dev/null and b/061_models.XCM_files/figure-html/cell-6-output-2.png differ diff --git a/061_models.XCM_files/figure-html/cell-6-output-3.png b/061_models.XCM_files/figure-html/cell-6-output-3.png new file mode 100644 index 000000000..d938a85ef Binary files /dev/null and b/061_models.XCM_files/figure-html/cell-6-output-3.png differ diff --git a/062_models.XCMPlus_files/figure-html/cell-5-output-1.png b/062_models.XCMPlus_files/figure-html/cell-5-output-1.png new file mode 100644 index 000000000..151f8048d Binary files /dev/null and b/062_models.XCMPlus_files/figure-html/cell-5-output-1.png differ diff --git a/062_models.XCMPlus_files/figure-html/cell-5-output-2.png b/062_models.XCMPlus_files/figure-html/cell-5-output-2.png new file mode 100644 index 000000000..1d12ab5bf Binary files /dev/null and b/062_models.XCMPlus_files/figure-html/cell-5-output-2.png differ diff --git a/062_models.XCMPlus_files/figure-html/cell-6-output-2.png b/062_models.XCMPlus_files/figure-html/cell-6-output-2.png new file mode 100644 index 000000000..aca08badc Binary files /dev/null and b/062_models.XCMPlus_files/figure-html/cell-6-output-2.png differ diff --git a/062_models.XCMPlus_files/figure-html/cell-6-output-3.png b/062_models.XCMPlus_files/figure-html/cell-6-output-3.png new file mode 100644 index 000000000..b0a3c23bc Binary files /dev/null and b/062_models.XCMPlus_files/figure-html/cell-6-output-3.png differ diff --git a/068_models.TSiTPlus_files/figure-html/cell-8-output-1.png b/068_models.TSiTPlus_files/figure-html/cell-8-output-1.png new file mode 100644 index 000000000..742dd51a7 Binary files /dev/null and b/068_models.TSiTPlus_files/figure-html/cell-8-output-1.png differ diff --git a/069_models.TSSequencerPlus_files/figure-html/cell-10-output-1.png b/069_models.TSSequencerPlus_files/figure-html/cell-10-output-1.png new file mode 100644 index 000000000..742dd51a7 Binary files /dev/null and b/069_models.TSSequencerPlus_files/figure-html/cell-10-output-1.png differ diff --git a/analysis.html b/analysis.html new file mode 100644 index 000000000..b87f9e83d --- /dev/null +++ b/analysis.html @@ -0,0 +1,2434 @@ + + + + + + + + + +tsai - Analysis + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Analysis

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

fastai Learner extensions useful to perform prediction analysis.

+
+
+

source

+
+

Learner.show_probas

+
+
 Learner.show_probas (figsize=(6, 6), ds_idx=1, dl=None, one_batch=False,
+                      max_n=None, nrows:int=1, ncols:int=1, imsize:int=3,
+                      suptitle:str=None, sharex:Union[bool,Literal['none',
+                      'all','row','col']]=False, sharey:Union[bool,Literal
+                      ['none','all','row','col']]=False,
+                      squeeze:bool=True,
+                      width_ratios:Optional[Sequence[float]]=None,
+                      height_ratios:Optional[Sequence[float]]=None,
+                      subplot_kw:Optional[dict[str,Any]]=None,
+                      gridspec_kw:Optional[dict[str,Any]]=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
figsizetupleNoneWidth, height in inches of the returned figure
ds_idxint1
dlNoneTypeNone
one_batchboolFalse
max_nNoneTypeNone
nrowsint1Number of rows in returned axes grid
ncolsint1Number of columns in returned axes grid
imsizeint3Size (in inches) of images that will be displayed in the returned figure
suptitlestrNoneTitle to be set to returned figure
sharexbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
shareybool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
squeezeboolTrue
width_ratiosSequence[float] | NoneNone
height_ratiosSequence[float] | NoneNone
subplot_kwdict[str, Any] | NoneNone
gridspec_kwdict[str, Any] | NoneNone
Returns(plt.Figure, plt.Axes)Returns both fig and ax as a tuple
+
+

source

+
+
+

Learner.plot_confusion_matrix

+
+
 Learner.plot_confusion_matrix (ds_idx=1, dl=None, thr=0.5,
+                                normalize=False, title='Confusion matrix',
+                                cmap='Blues', norm_dec=2, figsize=(5, 5),
+                                title_fontsize=12, fontsize=10,
+                                plot_txt=True, **kwargs)
+
+

Plot the confusion matrix, with title and using cmap.

+
+

source

+
+
+

Learner.plot_top_losses

+
+
 Learner.plot_top_losses (X, y, k:int=9, largest=True, bs:int=64,
+                          **kwargs)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
Xarray-like object representing the independent variables
yarray-like object representing the target
kint9Optional. #items to plot
largestboolTrueFlag to show largest or smallest losses
bsint64batch size
kwargs
+
+

source

+
+
+

Learner.top_losses

+
+
 Learner.top_losses (X, y, k:int=9, largest=True, bs:int=64)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
Xarray-like object representing the independent variables
yarray-like object representing the target
kint9Optional. #items to plot
largestboolTrueFlag to show largest or smallest losses
bsint64batch size
+
+
+

Permutation importance

+

We’ve also introduced 2 methods to help you better understand how important certain features or certain steps are for your model. Both methods use permutation importance.

+

⚠️The permutation feature or step importance is defined as the decrease in a model score when a single feature or step value is randomly shuffled.

+

So if you using accuracy (higher is better), the most important features or steps will be those with a lower value on the chart (as randomly shuffling them reduces performance).

+

The opposite occurs for metrics like mean squared error (lower is better). In this case, the most important features or steps will be those with a higher value on the chart.

+

There are 2 issues with step importance:

+
    +
  • there may be many steps and the analysis could take very long
  • +
  • steps will likely have a high autocorrelation
  • +
+

For those reasons, we’ve introduced an argument (n_steps) to group steps. In this way you’ll be able to know which part of the time series is the most important.

+

Feature importance has been adapted from https://www.kaggle.com/cdeotte/lstm-feature-importance by Chris Deotte (Kaggle GrandMaster).

+
+

source

+
+

Learner.feature_importance

+
+
 Learner.feature_importance (X=None, y=None, bs:int=None,
+                             partial_n:(<class'int'>,<class'float'>)=None,
+                             method:str='permutation',
+                             feature_names:list=None, sel_classes:(<class'
+                             str'>,<class'list'>)=None,
+                             key_metric_idx:int=0, show_chart:bool=True,
+                             figsize:tuple=None, title:str=None,
+                             return_df:bool=True,
+                             save_df_path:pathlib.Path=None,
+                             random_state:int=23, verbose:bool=True)
+
+

Calculates feature importance as the drop in the model’s validation loss or metric when a feature value is randomly shuffled

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
XNoneTypeNonearray-like object containing the time series. If None, all data in the validation set will be used.
yNoneTypeNonearray-like object containing the targets. If None, all targets in the validation set will be used.
bsintNonebatch size. If None, the default batch size of the dataloader will be used.
partial_n(<class ‘int’>, <class ‘float’>)None# (int) or % (float) of used to measure feature importance. If None, all data will be used.
methodstrpermutationMethod used to invalidate feature. Use ‘permutation’ for shuffling or ‘ablation’ for setting values to np.nan.
feature_nameslistNoneOptional list of feature names that will be displayed if available. Otherwise var_0, var_1, etc.
sel_classes(<class ‘str’>, <class ‘list’>)Noneclasses for which the analysis will be made
key_metric_idxint0Optional position of the metric used. If None or no metric is available, the loss will be used.
show_chartboolTrueFlag to indicate if a chart showing permutation feature importance will be plotted.
figsizetupleNoneSize of the chart.
titlestrNoneOptional string that will be used as the chart title. If None ‘Permutation Feature Importance’.
return_dfboolTrueFlag to indicate if the dataframe with feature importance will be returned.
save_df_pathPathNonePath where dataframe containing the permutation feature importance results will be saved.
random_stateint23Optional int that controls the shuffling applied to the data.
verboseboolTrueFlag that controls verbosity.
+
+

source

+
+
+

Learner.step_importance

+
+
 Learner.step_importance (X=None, y=None, bs:int=None,
+                          partial_n:(<class'int'>,<class'float'>)=None,
+                          method:str='permutation', step_names:list=None,
+                          sel_classes:(<class'str'>,<class'list'>)=None,
+                          n_steps:int=1, key_metric_idx:int=0,
+                          show_chart:bool=True, figsize:tuple=(10, 5),
+                          title:str=None, xlabel=None,
+                          return_df:bool=True,
+                          save_df_path:pathlib.Path=None,
+                          random_state:int=23, verbose:bool=True)
+
+

Calculates step importance as the drop in the model’s validation loss or metric when a step/s value/s is/are randomly shuffled

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
XNoneTypeNonearray-like object containing the time series. If None, all data in the validation set will be used.
yNoneTypeNonearray-like object containing the targets. If None, all targets in the validation set will be used.
bsintNonebatch size used to compute predictions. If None, the batch size used in the validation set will be used.
partial_n(<class ‘int’>, <class ‘float’>)None# (int) or % (float) of used to measure feature importance. If None, all data will be used.
methodstrpermutationMethod used to invalidate feature. Use ‘permutation’ for shuffling or ‘ablation’ for setting values to np.nan.
step_nameslistNoneOptional list of step names that will be displayed if available. Otherwise 0, 1, 2, etc.
sel_classes(<class ‘str’>, <class ‘list’>)Noneclasses for which the analysis will be made
n_stepsint1# of steps that will be analyzed at a time. Default is 1.
key_metric_idxint0Optional position of the metric used. If None or no metric is available, the loss will be used.
show_chartboolTrueFlag to indicate if a chart showing permutation feature importance will be plotted.
figsizetuple(10, 5)Size of the chart.
titlestrNoneOptional string that will be used as the chart title. If None ‘Permutation Feature Importance’.
xlabelNoneTypeNoneOptional string that will be used as the chart xlabel. If None ‘steps’.
return_dfboolTrueFlag to indicate if the dataframe with feature importance will be returned.
save_df_pathPathNonePath where dataframe containing the permutation feature importance results will be saved.
random_stateint23Optional int that controls the shuffling applied to the data.
verboseboolTrueFlag that controls verbosity.
+
+
from tsai.data.external import get_UCR_data
+from tsai.data.preprocessing import TSRobustScale, TSStandardize
+from tsai.learner import ts_learner
+from tsai.models.FCNPlus import FCNPlus
+from tsai.metrics import accuracy
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+tfms  = [None, [TSClassification()]]
+batch_tfms = TSRobustScale()
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits=splits, sel_vars=[0, 3, 5, 8, 10], sel_steps=slice(-30, None), tfms=tfms, batch_tfms=batch_tfms)
+learn = ts_learner(dls, FCNPlus, metrics=accuracy, train_metrics=True)
+learn.fit_one_cycle(2)
+learn.plot_metrics()
+learn.show_probas()
+learn.plot_confusion_matrix()
+learn.plot_top_losses(X[splits[1]], y[splits[1]], largest=True)
+learn.top_losses(X[splits[1]], y[splits[1]], largest=True)
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
epochtrain_losstrain_accuracyvalid_lossvalid_accuracytime
01.7925110.1875001.6194600.21666700:02
11.5926810.6328121.4759910.25000000:01
+
+
+
+
+

+
+
+
+
+ + +
+
+ +
+
+
+
+

+
+
+
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
(TensorBase([2.3713, 2.3146, 2.2843, 2.2581, 2.2408, 2.2264, 2.2254, 2.2237,
+             2.2230]),
+ [9, 56, 128, 25, 104, 116, 57, 72, 108])
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
learn.feature_importance()
+
+
X.shape: (180, 24, 51)
+y.shape: (180,)
+Selected metric: accuracy
+Computing feature importance (permutation method)...
+  0 feature: BASELINE             accuracy: 0.277778
+  0 feature: var_0                accuracy: 0.238889
+  3 feature: var_3                accuracy: 0.172222
+  5 feature: var_5                accuracy: 0.261111
+  8 feature: var_8                accuracy: 0.250000
+ 10 feature: var_10               accuracy: 0.266667
+
+
+
+ + +
+
+ +
+ + 100.00% [6/6 00:04<00:00] +
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+
+

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Featureaccuracyaccuracy_change
0var_30.1722220.105556
1var_00.2388890.038889
2var_80.2500000.027778
3var_50.2611110.016667
4var_100.2666670.011111
5BASELINE0.277778-0.000000
+ +
+
+
+
+
learn.step_importance(n_steps=5);
+
+
X.shape: (180, 24, 51)
+y.shape: (180,)
+Selected metric: accuracy
+Computing step importance...
+  0 step: BASELINE             accuracy: 0.277778
+  1 step: 21 to 25             accuracy: 0.288889
+  2 step: 26 to 30             accuracy: 0.255556
+  3 step: 31 to 35             accuracy: 0.194444
+  4 step: 36 to 40             accuracy: 0.216667
+  5 step: 41 to 45             accuracy: 0.272222
+  6 step: 46 to 50             accuracy: 0.283333
+
+
+
+ + +
+
+ +
+ + 100.00% [7/7 00:04<00:00] +
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+
+

+
+
+
+
+

You may pass an X and y if you want to analyze a particular group of samples:

+
learn.feature_importance(X=X[splits[1]], y=y[splits[1]])
+

If you have a large validation dataset, you may also use the partial_n argument to select a fixed amount of samples (integer) or a percentage of the validation dataset (float):

+
learn.feature_importance(partial_n=.1)
+
learn.feature_importance(partial_n=100)
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/calibration.html b/calibration.html new file mode 100644 index 000000000..e16c600b9 --- /dev/null +++ b/calibration.html @@ -0,0 +1,1361 @@ + + + + + + + + + +tsai - Calibration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Calibration

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functionality to calibrate a trained, binary classification model using temperature scaling.

+
+
+

source

+
+

ECELoss

+
+
 ECELoss (n_bins=10)
+
+

Calculates the Expected Calibration Error of a model.

+
+

source

+
+
+

TemperatureSetter

+
+
 TemperatureSetter (model, lr=0.01, max_iter=1000, line_search_fn=None,
+                    n_bins=10, verbose=True)
+
+

Calibrates a binary classification model optimizing temperature

+
+

source

+
+
+

ModelWithTemperature

+
+
 ModelWithTemperature (model)
+
+

A decorator which wraps a model with temperature scaling

+
+

source

+
+
+

plot_calibration_curve

+
+
 plot_calibration_curve (labels, logits, cal_logits=None, figsize=(6, 6),
+                         n_bins=10, strategy='uniform')
+
+
+

source

+
+
+

Learner.calibrate_model

+
+
 Learner.calibrate_model (X=None, y=None, lr=0.01, max_iter=10000,
+                          line_search_fn=None, n_bins=10,
+                          strategy='uniform', show_plot=True, figsize=(6,
+                          6), verbose=True)
+
+
+
from tsai.basics import *
+from tsai.models.FCNPlus import FCNPlus
+
+
+
X, y, splits = get_UCR_data('FingerMovements', split_data=False)
+tfms  = [None, [TSClassification()]]
+batch_tfms = TSRobustScale()
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+learn = ts_learner(dls, FCNPlus, metrics=accuracy)
+learn.fit_one_cycle(2)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
00.6968260.7060160.43000000:04
10.6902090.6997200.49000000:03
+
+
+
+
learn.calibrate_model()
+calibrated_model = learn.calibrated_model
+
+
Before temperature - NLL: 0.700, ECE: 0.066
+Calibrating the model...
+...model calibrated
+Optimal temperature: 6.383
+After temperature  - NLL: 0.693, ECE: 0.019
+
+
+
+
+
+

+
+
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/callback.core.html b/callback.core.html new file mode 100644 index 000000000..58935e559 --- /dev/null +++ b/callback.core.html @@ -0,0 +1,1476 @@ + + + + + + + + + +tsai - Callback + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Callback

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Miscellaneous callbacks for timeseriesAI.

+
+
+

Events

+

A callback can implement actions on the following events: * before_fit: called before doing anything, ideal for initial setup. * before_epoch: called at the beginning of each epoch, useful for any behavior you need to reset at each epoch. * before_train: called at the beginning of the training part of an epoch. * before_batch: called at the beginning of each batch, just after drawing said batch. It can be used to do any setup necessary for the batch (like hyper-parameter scheduling) or to change the input/target before it goes in the model (change of the input with techniques like mixup for instance). * after_pred: called after computing the output of the model on the batch. It can be used to change that output before it’s fed to the loss. * after_loss: called after the loss has been computed, but before the backward pass. It can be used to add any penalty to the loss (AR or TAR in RNN training for instance). * before_backward: called after the loss has been computed, but only in training mode (i.e. when the backward pass will be used) * after_backward: called after the backward pass, but before the update of the parameters. It can be used to do any change to the gradients before said update (gradient clipping for instance). * after_step: called after the step and before the gradients are zeroed. * after_batch: called at the end of a batch, for any clean-up before the next one. * after_train: called at the end of the training phase of an epoch. * before_validate: called at the beginning of the validation phase of an epoch, useful for any setup needed specifically for validation. * after_validate: called at the end of the validation part of an epoch. * after_epoch: called at the end of an epoch, for any clean-up before the next one. * after_fit: called at the end of training, for final clean-up.

+
+
+

Learner attributes

+

When writing a callback, the following attributes of Learner are available:

+
    +
  • model: the model used for training/validation
  • +
  • data: the underlying DataLoaders
  • +
  • loss_func: the loss function used
  • +
  • opt: the optimizer used to udpate the model parameters
  • +
  • opt_func: the function used to create the optimizer
  • +
  • cbs: the list containing all Callbacks
  • +
  • dl: current DataLoader used for iteration
  • +
  • x/xb: last input drawn from self.dl (potentially modified by callbacks). xb is always a tuple (potentially with one element) and x is detuplified. You can only assign to xb.
  • +
  • y/yb: last target drawn from self.dl (potentially modified by callbacks). yb is always a tuple (potentially with one element) and y is detuplified. You can only assign to yb.
  • +
  • pred: last predictions from self.model (potentially modified by callbacks)
  • +
  • loss: last computed loss (potentially modified by callbacks)
  • +
  • n_epoch: the number of epochs in this training
  • +
  • n_iter: the number of iterations in the current self.dl
  • +
  • epoch: the current epoch index (from 0 to n_epoch-1)
  • +
  • iter: the current iteration index in self.dl (from 0 to n_iter-1)
  • +
+

The following attributes are added by TrainEvalCallback and should be available unless you went out of your way to remove that callback: * train_iter: the number of training iterations done since the beginning of this training * pct_train: from 0. to 1., the percentage of training iterations completed * training: flag to indicate if we’re in training mode or not

+

The following attribute is added by Recorder and should be available unless you went out of your way to remove that callback: * smooth_loss: an exponentially-averaged version of the training loss

+
+
+

Transform scheduler

+
+

source

+
+

TransformScheduler

+
+
 TransformScheduler (schedule_func:<built-infunctioncallable>,
+                     show_plot:bool=False)
+
+

A callback to schedule batch transforms during training based on a function (sched_lin, sched_exp, sched_cos (default), etc)

+
+
TransformScheduler(SchedCos(1, 0))
+
+
TransformScheduler(<fastai.callback.schedule._Annealer object>)
+
+
+
+
p = torch.linspace(0.,1,100)
+f = combine_scheds([0.3, 0.4, 0.3], [SchedLin(1.,1.), SchedCos(1.,0.), SchedLin(0.,.0), ])
+plt.plot(p, [f(o) for o in p]);
+
+
+
+

+
+
+
+
+
+
p = torch.linspace(0.,1,100)
+f = combine_scheds([0.3, 0.7], [SchedCos(0.,1.), SchedCos(1.,0.)])
+plt.plot(p, [f(o) for o in p]);
+
+
+
+

+
+
+
+
+
+
+
+

ShowGraph

+
+

source

+
+

ShowGraph

+
+
 ShowGraph (plot_metrics:bool=True, final_losses:bool=True,
+            perc:float=0.5)
+
+

(Modified) Update a graph of training and validation loss

+
+
+
+

SaveModel

+
+

source

+
+

SaveModel

+
+
 SaveModel (monitor='valid_loss', comp=None, min_delta=0.0, fname='model',
+            every_epoch=False, at_end=False, with_opt=False,
+            reset_on_fit=True, verbose=False)
+
+

A TrackerCallback that saves the model’s best during training and loads it at the end with a verbose option.

+
+
+
+

Weight per sample loss

+

This process shows an example of how the weights could be calculated. This particular regression method was published in:

+

Yang, Y., Zha, K., Chen, Y. C., Wang, H., & Katabi, D. (2021). Delving into Deep Imbalanced Regression. arXiv preprint arXiv:2102.09554.
+(https://arxiv.org/pdf/2102.09554.pdf)

+
+

source

+
+

prepare_LDS_weights

+
+
 prepare_LDS_weights (labels, n_bins=None, label_range=None,
+                      reweight='inv', lds_kernel='gaussian', lds_ks=9,
+                      lds_sigma=1, max_rel_weight=None, show_plot=True)
+
+
+

source

+
+
+

get_lds_kernel_window

+
+
 get_lds_kernel_window (lds_kernel='gaussian', lds_ks=9, lds_sigma=1)
+
+

Function to determine the label distribution smoothing kernel window

+

lds_kernel (str): LDS kernel type lds_ks (int): LDS kernel size (should be an odd number). lds_sigma (float): LDS gaussian/laplace kernel sigma

+
+
labels = np.concatenate([np.random.normal(-20, 1, 10), np.random.normal(0, 2, 100), np.random.normal(12, 2, 300)], -1)
+labels[(-1<labels) & (labels<1)] = 0   # This is done to create some 'gaps' for demo purposes
+labels[(10<labels) & (labels<12)] = 0  # This is done to create some 'gaps' for demo purposes
+
+n_bins = 50
+label_range=None
+reweight = 'inv'
+lds_kernel='gaussian'
+lds_ks=5
+lds_sigma=2
+
+weights_per_sample = prepare_LDS_weights(labels, n_bins, label_range=label_range, reweight=reweight, 
+                                         lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma, show_plot=True)
+
+n_bins = 50
+label_range=None
+reweight = 'sqrt_inv'
+lds_kernel='gaussian'
+lds_ks=5
+lds_sigma=2
+
+weights_per_sample = prepare_LDS_weights(labels, n_bins, label_range=label_range, reweight=reweight, 
+                                         lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma, show_plot=True)
+
+n_bins = None
+label_range=None
+reweight = 'sqrt_inv'
+lds_kernel='triang'
+lds_ks=9
+lds_sigma=1
+
+weights_per_sample = prepare_LDS_weights(labels, n_bins, label_range=label_range, reweight=reweight, 
+                                         lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma, show_plot=True)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

WeightedPerSampleLoss

+
+
 WeightedPerSampleLoss (instance_weights)
+
+

Basic class handling tweaks of the training loop by changing a Learner in various events

+
+
+
+

BatchSubsampler

+
+

source

+
+

BatchSubsampler

+
+
 BatchSubsampler (sample_pct:Optional[float]=None,
+                  step_pct:Optional[float]=None, same_seq_len:bool=True,
+                  update_y:bool=False)
+
+

Callback that selects a percentage of samples and/ or sequence steps with replacement from each training batch

+
+
+
+

Args:

+

sample_pct: percentage of random samples (or instances) that will be drawn. If 1. the output batch will contain the same number of samples as the input batch. step_pct: percentage of random sequence steps that will be drawn. If 1. the output batch will contain the same number of sequence steps as the input batch. If used with models that don’t use a pooling layer, this must be set to 1 to keep the same dimensions. With CNNs, this value may be different. same_seq_len: If True, it ensures that the output has the same shape as the input, even if the step_pct chosen is < 1. Defaults to True. update_y: used with step_pct. If True, it applies the same random indices to y. It can only be used with sequential targets.

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/callback.experimental.html b/callback.experimental.html new file mode 100644 index 000000000..e5799a189 --- /dev/null +++ b/callback.experimental.html @@ -0,0 +1,1474 @@ + + + + + + + + + +tsai - Experimental Callbacks + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Experimental Callbacks

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Miscellaneous experimental callbacks for timeseriesAI.

+
+
+

Gambler’s loss: noisy labels

+
+

source

+
+

gambler_loss

+
+
 gambler_loss (reward=2)
+
+
+

source

+
+
+

GamblersCallback

+
+
 GamblersCallback (after_create=None, before_fit=None, before_epoch=None,
+                   before_train=None, before_batch=None, after_pred=None,
+                   after_loss=None, before_backward=None,
+                   after_cancel_backward=None, after_backward=None,
+                   before_step=None, after_cancel_step=None,
+                   after_step=None, after_cancel_batch=None,
+                   after_batch=None, after_cancel_train=None,
+                   after_train=None, before_validate=None,
+                   after_cancel_validate=None, after_validate=None,
+                   after_cancel_epoch=None, after_epoch=None,
+                   after_cancel_fit=None, after_fit=None)
+
+

A callback to use metrics with gambler’s loss

+
+
from tsai.data.external import *
+from tsai.data.core import *
+from tsai.models.InceptionTime import *
+from tsai.models.layers import *
+from tsai.learner import *
+from fastai.metrics import *
+from tsai.metrics import *
+
+
+
X, y, splits = get_UCR_data('NATOPS', return_split=False)
+tfms = [None, TSCategorize()]
+dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[64, 128])
+loss_func = gambler_loss()
+learn = ts_learner(dls, InceptionTime(dls.vars, dls.c + 1), loss_func=loss_func, cbs=GamblersCallback, metrics=[accuracy])
+learn.fit_one_cycle(1)
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.8400551.9453970.16666700:05
+
+
+
+
+
+

Uncertainty-based data augmentation

+
+

source

+
+

UBDAug

+
+
 UBDAug (batch_tfms:list, N:int=2, C:int=4, S:int=1)
+
+

A callback to implement the uncertainty-based data augmentation.

+
+
from tsai.models.utils import *
+
+
+
X, y, splits = get_UCR_data('NATOPS', return_split=False)
+tfms = [None, TSCategorize()]
+dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=[TSStandardize()])
+model = build_ts_model(InceptionTime, dls=dls)
+TS_tfms = [TSMagScale(.75, p=.5), TSMagWarp(.1, p=0.5),  TSWindowWarp(.25, p=.5), 
+           TSSmooth(p=0.5), TSRandomResizedCrop(.1, p=.5), 
+           TSRandomCropPad(.3, p=0.5), 
+           TSMagAddNoise(.5, p=.5)]
+
+ubda_cb = UBDAug(TS_tfms, N=2, C=4, S=2)
+learn = ts_learner(dls, model, cbs=ubda_cb, metrics=accuracy)
+learn.fit_one_cycle(1)
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.8170801.7911190.07777800:14
+
+
+
+
+
+

BatchLossFilter

+
+

source

+
+

BatchLossFilter

+
+
 BatchLossFilter (loss_perc=1.0, schedule_func:Optional[<built-
+                  infunctioncallable>]=None)
+
+

Callback that selects the hardest samples in every batch representing a percentage of the total loss

+
+
+
+

RandomWeightLossWrapper

+
+

source

+
+

RandomWeightLossWrapper

+
+
 RandomWeightLossWrapper (after_create=None, before_fit=None,
+                          before_epoch=None, before_train=None,
+                          before_batch=None, after_pred=None,
+                          after_loss=None, before_backward=None,
+                          after_cancel_backward=None, after_backward=None,
+                          before_step=None, after_cancel_step=None,
+                          after_step=None, after_cancel_batch=None,
+                          after_batch=None, after_cancel_train=None,
+                          after_train=None, before_validate=None,
+                          after_cancel_validate=None, after_validate=None,
+                          after_cancel_epoch=None, after_epoch=None,
+                          after_cancel_fit=None, after_fit=None)
+
+

Basic class handling tweaks of the training loop by changing a Learner in various events

+
+
+
+

BatchMasker

+
+

source

+
+

BatchMasker

+
+
 BatchMasker (r:float=0.15, lm:int=3, stateful:bool=True, sync:bool=False,
+              subsequence_mask:bool=True, variable_mask:bool=False,
+              future_mask:bool=False, schedule_func:Optional[<built-
+              infunctioncallable>]=None)
+
+

Callback that applies a random mask to each sample in a training batch

+
+
+
+

Args:

+

r: probability of masking. subsequence_mask: apply a mask to random subsequences. lm: average mask len when using stateful (geometric) masking. stateful: geometric distribution is applied so that average mask length is lm. sync: all variables have the same masking. variable_mask: apply a mask to random variables. Only applicable to multivariate time series. future_mask: used to train a forecasting model. schedule_func: if a scheduler is passed, it will modify the probability of masking during training.

+
+
+

SamplerWithReplacement

+
+

source

+
+

SamplerWithReplacement

+
+
 SamplerWithReplacement (after_create=None, before_fit=None,
+                         before_epoch=None, before_train=None,
+                         before_batch=None, after_pred=None,
+                         after_loss=None, before_backward=None,
+                         after_cancel_backward=None, after_backward=None,
+                         before_step=None, after_cancel_step=None,
+                         after_step=None, after_cancel_batch=None,
+                         after_batch=None, after_cancel_train=None,
+                         after_train=None, before_validate=None,
+                         after_cancel_validate=None, after_validate=None,
+                         after_cancel_epoch=None, after_epoch=None,
+                         after_cancel_fit=None, after_fit=None)
+
+

Callback that modify the sampler to select a percentage of samples and/ or sequence steps with replacement from each training batch

+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/callback.mvp.html b/callback.mvp.html new file mode 100644 index 000000000..dd6418067 --- /dev/null +++ b/callback.mvp.html @@ -0,0 +1,1612 @@ + + + + + + + + + +tsai - MVP (aka TSBERT) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MVP (aka TSBERT)

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Self-Supervised Pretraining of Time Series Models

+
+

Masked Value Predictor callback used to predict time series step values after a binary mask has been applied.

+
+

source

+
+

self_mask

+
+
 self_mask (o)
+
+
+

source

+
+
+

create_future_mask

+
+
 create_future_mask (o, r=0.15, sync=False)
+
+
+

source

+
+
+

create_variable_mask

+
+
 create_variable_mask (o, r=0.15)
+
+
+

source

+
+
+

create_subsequence_mask

+
+
 create_subsequence_mask (o, r=0.15, lm=3, stateful=True, sync=False)
+
+
+
t = torch.rand(16, 3, 100)
+mask = create_subsequence_mask(t, sync=False)
+test_eq(mask.shape, t.shape)
+mask = create_subsequence_mask(t, sync=True)
+test_eq(mask.shape, t.shape)
+mask = create_variable_mask(t)
+test_eq(mask.shape, t.shape)
+mask = create_future_mask(t)
+test_eq(mask.shape, t.shape)
+
+
+
o = torch.randn(2, 3, 4)
+o[o>.5] = np.nan
+test_eq(torch.isnan(self_mask(o)).sum(), 0)
+
+
+
t = torch.rand(16, 30, 100)
+mask = create_subsequence_mask(t, r=.15) # default settings
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'sample 0 subsequence mask (sync=False) - default mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[1], cmap='cool')
+plt.title(f'sample 1 subsequence mask (sync=False) - default mean: {mask[1].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
t = torch.rand(16, 30, 100)
+mask = create_subsequence_mask(t, r=.5) # 50% of values masked
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'sample 0 subsequence mask (r=.5) mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(16, 30, 100)
+mask = create_subsequence_mask(t, lm=5) # average length of mask = 5 
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'sample 0 subsequence mask (lm=5) mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(16, 30, 100)
+mask = create_subsequence_mask(t, stateful=False) # individual time steps masked 
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'per sample subsequence mask (stateful=False) mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(1, 30, 100)
+mask = create_subsequence_mask(t, sync=True) # all time steps masked simultaneously
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'per sample subsequence mask (sync=True) mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(1, 30, 100)
+mask = create_variable_mask(t) # masked variables
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'per sample variable mask mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(1, 30, 100)
+mask = create_future_mask(t, r=.15, sync=True) # masked steps
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'future mask mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.rand(1, 30, 100)
+mask = create_future_mask(t, r=.15, sync=False) # masked steps
+mask = create_future_mask(t, r=.15, sync=True) # masked steps
+test_eq(mask.dtype, torch.bool)
+plt.figure(figsize=(10, 3))
+plt.pcolormesh(mask[0], cmap='cool')
+plt.title(f'future mask mean: {mask[0].float().mean().item():.3f}')
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

create_mask

+
+
 create_mask (o, r=0.15, lm=3, stateful=True, sync=False,
+              subsequence_mask=True, variable_mask=False,
+              future_mask=False)
+
+
+

source

+
+
+

MVP

+
+
 MVP (r:float=0.15, subsequence_mask:bool=True, lm:float=3.0,
+      stateful:bool=True, sync:bool=False, variable_mask:bool=False,
+      future_mask:bool=False, custom_mask:Optional=None,
+      sel_vars:Optional[list]=None, nan_to_num:int=0,
+      window_size:Optional[tuple]=None, dropout:float=0.1, crit:<built-
+      infunctioncallable>=None, weights_path:Optional[str]=None,
+      target_dir:str='./models/MVP', fname:str='model',
+      save_best:bool=True, verbose:bool=False)
+
+

Basic class handling tweaks of the training loop by changing a Learner in various events

+
+
+

Experiments

+
+
from tsai.data.external import get_UCR_data, check_data
+from tsai.data.preprocessing import TSStandardize, TSNan2Value
+from tsai.data.core import TSCategorize, get_ts_dls
+from tsai.learner import ts_learner
+from tsai.models.InceptionTimePlus import InceptionTimePlus
+
+
+
dsid = 'MoteStrain'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+check_data(X, y, splits, False)
+X[X<-1] = np.nan # This is to test the model works well even if nan values are passed through the dataloaders.
+
+
X      - shape: [1272 samples x 1 features x 84 timesteps]  type: memmap  dtype:float32  isnan: 0
+y      - shape: (1272,)  type: memmap  dtype:<U1  n_classes: 2 (636 samples per class) ['1', '2']  isnan: False
+splits - n_splits: 2 shape: [20, 1252]  overlap: False
+
+
+
+
# Pre-train
+tfms  = [None, [TSCategorize()]]
+batch_tfms = [TSStandardize(by_var=True)]
+unlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+learn = ts_learner(unlabeled_dls, InceptionTimePlus, cbs=[MVP(fname=f'{dsid}', window_size=(.5, 1))]) # trained on variable window size
+learn.fit_one_cycle(1, 3e-3)
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_losstime
01.2709721.19497400:06
+
+
+
+
learn = ts_learner(unlabeled_dls, InceptionTimePlus, cbs=[MVP(weights_path=f'models/MVP/{dsid}.pth')])
+learn.fit_one_cycle(1, 3e-3)
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_losstime
00.8377411.20048400:07
+
+
+
+
learn.MVP.show_preds(sharey=True) # these preds are highly inaccurate as the model's been trained for just 1 epoch for testing purposes
+
+
+
+

+
+
+
+
+
+
# Fine-tune
+tfms  = [None, [TSCategorize()]]
+batch_tfms = [TSStandardize(by_var=True), TSNan2Value()]
+labeled_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)
+learn = ts_learner(labeled_dls, InceptionTimePlus, pretrained=True, weights_path=f'models/MVP/{dsid}.pth', metrics=accuracy)
+learn.fit_one_cycle(1)
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
00.7730150.7442670.46086300:09
+
+
+
+
tfms  = [None, [TSCategorize()]]
+batch_tfms = [TSStandardize(by_var=True), TSNan2Value()]
+unlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)
+fname = f'{dsid}_test'
+mvp = MVP(subsequence_mask=True, sync='random', variable_mask=True, future_mask=True, fname=fname)
+learn = ts_learner(unlabeled_dls, InceptionTimePlus, metrics=accuracy, cbs=mvp) # Metrics will not be used!
+
+
/Users/nacho/opt/anaconda3/envs/py37torch113/lib/python3.7/site-packages/ipykernel_launcher.py:42: UserWarning: Only future_mask will be used
+
+
+
+
tfms  = [None, [TSCategorize()]]
+batch_tfms = [TSStandardize(by_var=True)]
+unlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)
+fname = f'{dsid}_test'
+mvp = MVP(subsequence_mask=True, sync='random', variable_mask=True, future_mask=True, custom_mask=partial(create_future_mask, r=.15),
+                fname=fname)
+learn = ts_learner(unlabeled_dls, InceptionTimePlus, metrics=accuracy, cbs=mvp) # Metrics will not be used!
+
+
/Users/nacho/opt/anaconda3/envs/py37torch113/lib/python3.7/site-packages/ipykernel_launcher.py:40: UserWarning: Only custom_mask will be used
+
+
+
+
try: os.remove("models/MVP/MoteStrain.pth")
+except OSError: pass
+try: os.remove("models/MVP/model.pth")
+except OSError: pass
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/callback.noisy_student.html b/callback.noisy_student.html new file mode 100644 index 000000000..a994fe702 --- /dev/null +++ b/callback.noisy_student.html @@ -0,0 +1,1390 @@ + + + + + + + + + +tsai - Noisy student + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Noisy student

+
+ + + +
+ + + + +
+ + + +
+ + + +

Callback to apply noisy student self-training (a semi-supervised learning approach) based on:

+

Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).

+
+

source

+
+

NoisyStudent

+
+
 NoisyStudent (dl2:fastai.data.load.DataLoader, bs:Optional[int]=None,
+               l2pl_ratio:int=1, batch_tfms:Optional[list]=None,
+               do_setup:bool=True, pseudolabel_sample_weight:float=1.0,
+               verbose=False)
+
+

A callback to implement the Noisy Student approach. In the original paper this was used in combination with noise: - stochastic depth: .8 - RandAugment: N=2, M=27 - dropout: .5

+

Steps: 1. Build the dl you will use as a teacher 2. Create dl2 with the pseudolabels (either soft or hard preds) 3. Pass any required batch_tfms to the callback

+
+
from tsai.data.all import *
+from tsai.models.all import *
+from tsai.tslearner import *
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+X = X.astype(np.float32)
+
+
+
pseudolabeled_data = X
+soft_preds = True
+
+pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
+dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
+dl2 = TSDataLoader(dsets2, num_workers=0)
+noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
+tfms = [None, TSClassification]
+learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)
+learn.fit_one_cycle(1)
+
+
labels / pseudolabels per training batch              : 171 / 85
+relative labeled/ pseudolabel sample weight in dataset: 4.0
+
+X: torch.Size([171, 24, 51])  X2: torch.Size([85, 24, 51])  X_comb: torch.Size([256, 24, 41])
+y: torch.Size([171])  y2: torch.Size([85])  y_comb: torch.Size([256])
+
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.7821441.7584710.25000000:00
+
+
+
+
pseudolabeled_data = X
+soft_preds = False
+
+pseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)
+pseudolabels = pseudolabels.astype(np.float32)
+dsets2 = TSDatasets(pseudolabeled_data, pseudolabels)
+dl2 = TSDataLoader(dsets2, num_workers=0)
+noisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)
+tfms = [None, TSClassification]
+learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)
+learn.fit_one_cycle(1)
+
+
labels / pseudolabels per training batch              : 171 / 85
+relative labeled/ pseudolabel sample weight in dataset: 4.0
+
+X: torch.Size([171, 24, 51])  X2: torch.Size([85, 24, 51])  X_comb: torch.Size([256, 24, 51])
+y: torch.Size([171, 6])  y2: torch.Size([85, 6])  y_comb: torch.Size([256, 6])
+
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.8984011.8411820.15555600:00
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/callback.predictiondynamics.html b/callback.predictiondynamics.html new file mode 100644 index 000000000..190d165d8 --- /dev/null +++ b/callback.predictiondynamics.html @@ -0,0 +1,1346 @@ + + + + + + + + + +tsai - PredictionDynamics + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

PredictionDynamics

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Callback used to visualize model predictions during training.

+
+

This is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on a blog post by Andrej Karpathy I read some time ago that I really liked. One of the things he mentioned was this:

+
+

visualize prediction dynamics. I like to visualize model predictions on a fixed test batch during the course of training. The “dynamics” of how these predictions move will give you incredibly good intuition for how the training progresses. Many times it is possible to feel the network “struggle” to fit your data if it wiggles too much in some way, revealing instabilities. Very low or very high learning rates are also easily noticeable in the amount of jitter.” A. Karpathy

+
+
+

source

+
+

PredictionDynamics

+
+
 PredictionDynamics (show_perc=1.0, figsize=(10, 6), alpha=0.3, size=30,
+                     color='lime', cmap='gist_rainbow', normalize=False,
+                     sensitivity=None, specificity=None)
+
+

Basic class handling tweaks of the training loop by changing a Learner in various events

+
+
from tsai.basics import *
+from tsai.models.InceptionTime import *
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+check_data(X, y, splits, False)
+
+
X      - shape: [360 samples x 24 features x 51 timesteps]  type: memmap  dtype:float32  isnan: 0
+y      - shape: (360,)  type: memmap  dtype:<U3  n_classes: 6 (60 samples per class) ['1.0', '2.0', '3.0', '4.0', '5.0', '6.0']  isnan: False
+splits - n_splits: 2 shape: [180, 180]  overlap: False
+
+
+
+
tfms  = [None, [Categorize()]]
+batch_tfms = [TSStandardize(by_var=True)]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+learn = ts_learner(dls, InceptionTime, metrics=accuracy, cbs=PredictionDynamics()) 
+learn.fit_one_cycle(2, 3e-3)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.8854621.7738720.23888900:05
11.4256671.6404180.37777800:05
+
+
+
+ + + + + + + + + + + + + + + + + + + +
train_lossvalid_lossaccuracy
11.4256671.6404180.377778
+ +
+
+
+
+
+

+
+
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.core.html b/data.core.html new file mode 100644 index 000000000..e80d8eed9 --- /dev/null +++ b/data.core.html @@ -0,0 +1,3655 @@ + + + + + + + + + +tsai - Data Core + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Data Core

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Main Numpy and Times Series functions used throughout the library.

+
+
+
from tsai.data.external import get_UCR_data
+
+
+
dsid = 'OliveOil'
+X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, on_disk=True, force_download=True)
+X_on_disk, y_on_disk, splits = get_UCR_data(dsid, on_disk=True, return_split=False, force_download=True)
+X_in_memory, y_in_memory, splits = get_UCR_data(dsid, on_disk=False, return_split=False, force_download=True)
+y_tensor = cat2int(y_on_disk)
+y_array = y_tensor.numpy()
+
+
+

source

+
+

ToNumpyTensor

+
+
 ToNumpyTensor (enc=None, dec=None, split_idx=None, order=None)
+
+

Transforms an object into NumpyTensor

+
+

source

+
+
+

NumpyTensor

+
+
 NumpyTensor (o, dtype=None, device=None, copy=None, requires_grad=False,
+              **kwargs)
+
+

Returns a tensor with subclass NumpyTensor that has a show method

+
+

source

+
+
+

TSTensor

+
+
 TSTensor (o, dtype=None, device=None, copy=None, requires_grad=False,
+           **kwargs)
+
+

Returns a tensor with subclass TSTensor that has a show method

+
+

source

+
+
+

show_tuple

+
+
 show_tuple (tup, nrows:int=1, ncols:int=1,
+             sharex:Union[bool,Literal['none','all','row','col']]=False,
+             sharey:Union[bool,Literal['none','all','row','col']]=False,
+             squeeze:bool=True,
+             width_ratios:Optional[Sequence[float]]=None,
+             height_ratios:Optional[Sequence[float]]=None,
+             subplot_kw:Optional[dict[str,Any]]=None,
+             gridspec_kw:Optional[dict[str,Any]]=None)
+
+

Display a timeseries plot from a decoded tuple

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
tup
nrowsint1
ncolsint1
sharexbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
shareybool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
squeezeboolTrue- If True, extra dimensions are squeezed out from the returned
array of ~matplotlib.axes.Axes:

- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.

- If False, no squeezing at all is done: the returned Axes object is
always a 2D array containing Axes instances, even if it ends up
being 1x1.
width_ratiosSequence[float] | NoneNoneDefines the relative widths of the columns. Each column gets a
relative width of width_ratios[i] / sum(width_ratios).
If not given, all columns will have the same width. Equivalent
to gridspec_kw={'width_ratios': [...]}.
height_ratiosSequence[float] | NoneNoneDefines the relative heights of the rows. Each row gets a
relative height of height_ratios[i] / sum(height_ratios).
If not given, all rows will have the same height. Convenience
for gridspec_kw={'height_ratios': [...]}.
subplot_kwdict[str, Any] | NoneNoneDict with keywords passed to the
~matplotlib.figure.Figure.add_subplot call used to create each
subplot.
gridspec_kwdict[str, Any] | NoneNoneDict with keywords passed to the ~matplotlib.gridspec.GridSpec
constructor used to create the grid the subplots are placed on.
+
+

source

+
+
+

ToTSTensor

+
+
 ToTSTensor (enc=None, dec=None, split_idx=None, order=None)
+
+

Transforms an object into TSTensor

+
+
a = np.random.randn(2, 3, 4).astype(np.float16)
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
a = np.random.randn(2, 3, 4).astype(np.float32)
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
a = np.random.randint(10, size=10).astype(np.int64)
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
a = np.random.randint(10, size=10).astype(np.int32)
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
a = torch.rand(2, 3, 4).float()
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
a = torch.randint(3, (10,))
+assert np.shares_memory(a, NumpyTensor(a))
+assert np.shares_memory(a, TSTensor(a))
+
+
+
t = TSTensor(torch.randn(2, 3, 4))
+p = torch.tensor(3., requires_grad=True)
+test = torch.add(t, p)
+test_eq(test.requires_grad, True)
+test_eq(type(t.data), torch.Tensor)
+test_eq(type(t), TSTensor)
+
+
+
l = L([0,1,2,3], [4,5,6,7], [8, 9, 10, 11])
+TSTensor(l), TSTensor(l).data
+
+
(TSTensor(vars:3, len:4, device=cpu, dtype=torch.int64),
+ tensor([[ 0,  1,  2,  3],
+         [ 4,  5,  6,  7],
+         [ 8,  9, 10, 11]]))
+
+
+
+
t = TSTensor(X_train)
+for i in range(4):
+    print(t, t.ndim, torch.is_tensor(t))
+    if i < 3: t = t[0]
+
+
TSTensor(samples:30, vars:1, len:570, device=cpu, dtype=torch.float32) 3 True
+TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32) 2 True
+TSTensor(len:570, device=cpu, dtype=torch.float32) 1 True
+TSTensor([-0.6113752722740173], device=cpu, dtype=torch.float32) 0 True
+
+
+
+
TSTensor(X_on_disk)
+
+
TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32)
+
+
+
+
ToTSTensor()(X_on_disk)
+
+
TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32)
+
+
+
+
TSTensor(X_train).show();
+
+
+
+

+
+
+
+
+
+
TSTensor(X_train).show(title='1');
+
+
+
+

+
+
+
+
+
+
show_tuple((TSTensor(X_train), ['1', '2']))
+
+
+
+

+
+
+
+
+
+
show_tuple((TSTensor(np.arange(10).reshape(2,5)), 1))
+
+
+
+

+
+
+
+
+
+
show_tuple((TSTensor(np.arange(10).reshape(2,5)), '1'))
+
+
+
+

+
+
+
+
+
+
show_tuple((TSTensor(np.arange(10).reshape(2,5)), [1,2]))
+
+
+
+

+
+
+
+
+
+
show_tuple((TSTensor(np.arange(10).reshape(2,5)), ['1', '2']))
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSMaskTensor

+
+
 TSMaskTensor (o, dtype=None, device=None, copy=None, requires_grad=False,
+               **kwargs)
+
+

Returns a tensor with subclass NumpyTensor that has a show method

+
+

source

+
+
+

TSLabelTensor

+
+
 TSLabelTensor (o, dtype=None, device=None, copy=None,
+                requires_grad=False, **kwargs)
+
+

Returns a tensor with subclass NumpyTensor that has a show method

+
+
t = TSLabelTensor(torch.randint(0,10,(1, 2, 3)))
+t, t[0], t[0][0], t[0][0][0]
+
+
(TSLabelTensor(shape:(1, 2, 3), device=cpu, dtype=torch.int64),
+ TSLabelTensor(shape:(2, 3), device=cpu, dtype=torch.int64),
+ TSLabelTensor(shape:(3,), device=cpu, dtype=torch.int64),
+ 7)
+
+
+
+
t = TSMaskTensor(torch.randint(0,10,(1, 2, 3)))
+t, t[0], t[0][0], t[0][0][0]
+
+
(TSMaskTensor(shape:(1, 2, 3), device=cpu, dtype=torch.int64),
+ TSMaskTensor(shape:(2, 3), device=cpu, dtype=torch.int64),
+ TSMaskTensor(shape:(3,), device=cpu, dtype=torch.int64),
+ 1)
+
+
+
+

source

+
+
+

TSClassification

+
+
 TSClassification (vocab=None, sort=True)
+
+

Vectorized, reversible transform of category string to vocab id

+
+

source

+
+
+

ToInt

+
+
 ToInt (enc=None, dec=None, split_idx=None, order=None)
+
+

Transforms an object dtype to int

+
+

source

+
+
+

ToFloat

+
+
 ToFloat (enc=None, dec=None, split_idx=None, order=None)
+
+

Transforms an object dtype to float (vectorized)

+
+
a = np.random.randint(0, 2, 10)
+b = np.array(['1', '2', '3'])
+c = np.array(['1.0', '2.0', '3.0'])
+t = torch.randint(0, 2, (10, ))
+test_eq(ToFloat()(a).dtype, 'float32')
+test_eq(ToFloat()(b).dtype, 'float32')
+test_eq(ToFloat()(c).dtype, 'float32')
+test_eq(ToFloat()(t).dtype, torch.float32)
+
+
+
a = np.random.rand(10)*10
+b = np.array(['1.0', '2.0', '3.0'])
+t = torch.rand(10)*10
+test_eq(ToInt()(a).dtype, 'int64')
+test_eq(ToInt()(b).dtype, 'int64')
+test_eq(ToInt()(t).dtype, torch.long)
+
+
+
t = TSClassification()
+t.setup(y_on_disk[splits[0]])
+y_encoded = t(y_on_disk)
+print(y_encoded)
+test_eq(t.decodes(y_encoded), y_on_disk)
+
+
TensorCategory([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,
+                3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+                1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])
+
+
+
+
y_multi= np.random.randint(0,3,20)
+y_multi = np.asarray(alphabet[y_multi]).reshape(4,5)
+tfm = TSClassification()
+tfm.setup(y_multi)
+enc_y_multi = tfm(y_multi)
+test_eq(y_multi, tfm.decode(enc_y_multi))
+enc_y_multi
+
+
TensorCategory([[0, 1, 1, 1, 2],
+                [0, 1, 2, 1, 0],
+                [2, 1, 0, 1, 2],
+                [0, 2, 0, 2, 2]])
+
+
+
+

source

+
+
+

TSMultiLabelClassification

+
+
 TSMultiLabelClassification (c=None, vocab=None, add_na=False, sort=True)
+
+

Reversible combined transform of multi-category strings to one-hot encoded vocab id

+
+

source

+
+
+

TSTensorBlock

+
+
 TSTensorBlock (type_tfms=None, item_tfms=None, batch_tfms=None,
+                dl_type=None, dls_kwargs=None)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+

source

+
+
+

NumpyTensorBlock

+
+
 NumpyTensorBlock (type_tfms=None, item_tfms=None, batch_tfms=None,
+                   dl_type=None, dls_kwargs=None)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+
test_eq(NumpyTensorBlock().item_tfms[0].__name__, 'ToNumpyTensor')
+test_eq(TSTensorBlock().item_tfms[0].__name__, 'ToTSTensor')
+
+
+

source

+
+
+

TSDataset

+
+
 TSDataset (X, y=None, split=None, sel_vars=None, sel_steps=None,
+            types=None, dtype=None, device=None)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+

source

+
+
+

NumpyDataset

+
+
 NumpyDataset (X, y=None, types=None)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+

source

+
+
+

TorchDataset

+
+
 TorchDataset (X, y=None)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+
a = np.random.rand(5,6,7)
+b = np.random.rand(5)
+ds = NumpyDataset(a,b)
+xb, yb = ds[[0,4]]
+test_eq(xb.shape, (2,6,7))
+test_eq(yb.shape, (2,))
+
+
+

source

+
+
+

TSTfmdLists

+
+
 TSTfmdLists (items=None, *rest, use_list=False, match=None)
+
+

A Pipeline of tfms applied to a collection of items

+ + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
itemslistItems to apply Transforms to
use_listboolNoneUse list in L
+
+

source

+
+
+

NoTfmLists

+
+
 NoTfmLists (items=None, *rest, use_list=False, match=None)
+
+

A Pipeline of tfms applied to a collection of items

+
+
items = X_on_disk
+tl = TfmdLists(items, tfms=None, splits=splits)
+test_eq(len(tl), len(X_on_disk))
+test_eq(len(tl.train), len(splits[0]))
+test_eq(len(tl.valid), len(splits[1]))
+test_eq(tl[[0,4,7]], X_on_disk[[0,4,7]])
+test_eq(tl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])
+test_eq(tl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])
+test_eq(tl[0], items[0])
+test_eq(tl[[0,1]], items[[0,1]])
+test_eq(tl.decode(tl[0]), tl[0])
+test_eq((tl.split_idx, tl.train.split_idx, tl.valid.split_idx), (None, 0, 1))
+
+
+
items = X_on_disk
+tl = TSTfmdLists(items, tfms=None, splits=splits)
+test_eq(len(tl), len(X_on_disk))
+test_eq(len(tl.train), len(splits[0]))
+test_eq(len(tl.valid), len(splits[1]))
+test_eq(tl[[0,4,7]], X_on_disk[[0,4,7]])
+test_eq(tl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])
+test_eq(tl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])
+test_eq(tl[0], items[0])
+test_eq(tl[[0,1]], items[[0,1]])
+test_eq(tl.decode(tl[0]), tl[0])
+test_eq((tl.split_idx, tl.train.split_idx, tl.valid.split_idx), (None, 0, 1))
+
+
+
items = X_on_disk
+ntl = NoTfmLists(items, splits=splits)
+test_eq(len(ntl), len(X_on_disk))
+test_eq(len(ntl.train), len(splits[0]))
+test_eq(len(ntl.valid), len(splits[1]))
+test_eq(ntl._splits, np.arange(len(X_on_disk)))
+test_eq(ntl.train._splits, np.arange(len(splits[0])))
+test_eq(ntl.valid._splits, np.arange(len(splits[0]), len(X_on_disk)))
+print(ntl)
+print(ntl.train)
+print(ntl.valid)
+test_eq(ntl[[0,4,7]], X_on_disk[[0,4,7]])
+test_eq(ntl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])
+test_eq(ntl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])
+test_eq(ntl[0], items[0])
+test_eq(ntl[[0,1]], items[[0,1]])
+test_eq(ntl[:], X_on_disk)
+ntl[0].shape, stack(ntl[[0,1]]).shape
+test_eq(ntl.decode(ntl[0]), ntl[0])
+assert id(items) == id(ntl.items) == id(ntl.train.items) == id(ntl.valid.items)
+test_eq((ntl.split_idx, ntl.train.split_idx, ntl.valid.split_idx), (None, 0, 1))
+
+
NoTfmLists: memmap(60, 1, 570)
+NoTfmLists: memmap(30, 1, 570)
+NoTfmLists: memmap(30, 1, 570)
+
+
+
+
subitems = X_on_disk
+new_ntl = ntl._new(X_on_disk)
+test_eq(new_ntl[:], X_on_disk)
+
+
+
idxs = random_choice(len(X_on_disk), 10, False)
+new_ntl = ntl._new(X_on_disk[idxs])
+test_eq(new_ntl[:], X_on_disk[idxs])
+
+
+
idxs = random_choice(len(X_on_disk), 10, False)
+new_ntl = ntl.valid._new(X_on_disk[idxs])
+test_eq(new_ntl[:], X_on_disk[idxs])
+
+
+

source

+
+
+

tscoll_repr

+
+
 tscoll_repr (c, max_n=10)
+
+

String repr of up to max_n items of (possibly lazy) collection c

+
+

source

+
+
+

NumpyDatasets

+
+
 NumpyDatasets (items:list=None, tfms:MutableSequence|Pipeline=None,
+                tls:TfmdLists=None, n_inp:int=None, dl_type=None,
+                use_list:bool=None, do_setup:bool=True,
+                split_idx:int=None, train_setup:bool=True,
+                splits:list=None, types=None, verbose:bool=False)
+
+

A dataset that creates tuples from X (and y) and applies tfms of type item_tfms

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
itemslistItems to apply Transforms to
tfmsMutableSequence | PipelineTransform(s) or Pipeline to apply
tlsNoneTypeNone
n_inpNoneTypeNone
dl_typeTfmdDLNoneType of DataLoader
use_listboolNoneUse list in L
do_setupboolTrueCall setup() for Transform
split_idxintNoneApply Transform(s) to training or validation set. 0 for training set and 1 for validation set
train_setupboolTrueApply Transform(s) only on training DataLoader
splitslistNoneIndices for training and validation sets
typesNoneTypeNoneTypes of data in items
verboseboolFalsePrint verbose output
+
+

source

+
+
+

TSDatasets

+
+
 TSDatasets (items:list=None, tfms:MutableSequence|Pipeline=None,
+             tls:TfmdLists=None, n_inp:int=None, dl_type=None,
+             use_list:bool=None, do_setup:bool=True, split_idx:int=None,
+             train_setup:bool=True, splits:list=None, types=None,
+             verbose:bool=False)
+
+

A dataset that creates tuples from X (and optionally y) and applies item_tfms

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
itemslistItems to apply Transforms to
tfmsMutableSequence | PipelineTransform(s) or Pipeline to apply
tlsNoneTypeNone
n_inpNoneTypeNone
dl_typeTfmdDLNoneType of DataLoader
use_listboolNoneUse list in L
do_setupboolTrueCall setup() for Transform
split_idxintNoneApply Transform(s) to training or validation set. 0 for training set and 1 for validation set
train_setupboolTrueApply Transform(s) only on training DataLoader
splitslistNoneIndices for training and validation sets
typesNoneTypeNoneTypes of data in items
verboseboolFalsePrint verbose output
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=True)
+i = random_choice(len(splits[0]), 10, False).tolist()
+test_eq(dsets.subset(i), dsets.train.subset(i))
+dsets.valid.subset(i)
+dsets.valid.subset(i)[[0,6,8]]
+test_eq(dsets.subset(i)[[0,6,8]], dsets.train.subset(i)[[0,6,8]])
+dsets.subset([0,7,3])
+dsets.subset(i), dsets.train.subset(i), dsets.valid.subset(i)
+
+
((#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...],
+ (#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...],
+ (#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...])
+
+
+
+
tfms = [None, TSClassification()]
+dsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=tfms, inplace=False)
+assert id(X_on_disk) == id(dsets.ptls[0].items) == id(dsets.train.ptls[0].items) == id(dsets.valid.ptls[0].items)
+
+tfms = None
+dsets = TSDatasets(X_on_disk, splits=splits, tfms=tfms, inplace=False)
+assert id(X_on_disk) == id(dsets.ptls[0].items) == id(dsets.train.ptls[0].items) == id(dsets.valid.ptls[0].items)
+
+
+

source

+
+
+

TSDatasets.add_unlabeled

+
+
 TSDatasets.add_unlabeled (X, inplace=True)
+
+
+

source

+
+
+

TSDatasets.add_test

+
+
 TSDatasets.add_test (X, y=None, inplace=True)
+
+
+

source

+
+
+

TSDatasets.add_dataset

+
+
 TSDatasets.add_dataset (X, y=None, inplace=True)
+
+
+

source

+
+
+

NumpyDatasets.add_unlabeled

+
+
 NumpyDatasets.add_unlabeled (X, inplace=True)
+
+
+

source

+
+
+

NumpyDatasets.add_test

+
+
 NumpyDatasets.add_test (X, y=None, inplace=True)
+
+
+

source

+
+
+

NumpyDatasets.add_dataset

+
+
 NumpyDatasets.add_dataset (X, y=None, inplace=True)
+
+
+

source

+
+
+

add_ds

+
+
 add_ds (dsets, X, y=None, inplace=True)
+
+

Create test datasets from X (and y) using validation transforms of dsets

+
+
dsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=True)
+print(dsets.train[0][0].shape, dsets.train[[0,1]][0].shape)
+print(dsets.split_idx, dsets.train.split_idx, dsets.valid.split_idx)
+print(dsets.new_empty())
+dsets
+
+
torch.Size([1, 570]) torch.Size([2, 1, 570])
+None 0 1
+(#0) []
+
+
+
(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1))] ...]
+
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=False)
+print(dsets.train[0][0].shape, dsets.train[[0,1]][0].shape)
+print(dsets.split_idx, dsets.train.split_idx, dsets.valid.split_idx)
+print(dsets.new_empty())
+dsets
+
+
torch.Size([1, 570]) torch.Size([2, 1, 570])
+None 0 1
+(#0) []
+
+
+
(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1]))] ...]
+
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=splits, inplace=True)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+test_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+test_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=splits, inplace=False)
+assert id(X_on_disk) == id(dsets.tls[0].items) == id(dsets.ptls[0].items)
+assert id(X_on_disk) == id(dsets.train.tls[0].items) == id(dsets.train.ptls[0].items)
+assert id(X_on_disk) == id(dsets.valid.tls[0].items) == id(dsets.valid.ptls[0].items)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+test_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+test_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, splits=splits, inplace=True)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, splits=splits, inplace=False)
+assert np.shares_memory(X_on_disk, dsets.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.ptls[0].items)
+assert np.shares_memory(X_on_disk, dsets.train.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.train.ptls[0].items)
+assert np.shares_memory(X_on_disk, dsets.valid.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.valid.ptls[0].items)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits, inplace=True)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+test_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+test_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits, inplace=False)
+assert np.shares_memory(X_on_disk, dsets.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.ptls[0].items)
+assert np.shares_memory(X_on_disk, dsets.train.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.train.ptls[0].items)
+assert np.shares_memory(X_on_disk, dsets.valid.tls[0].items)
+assert np.shares_memory(X_on_disk, dsets.valid.ptls[0].items)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+idxs = random_choice(len(dsets.train), 10, False)
+test_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])
+test_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])
+
+idxs = random_choice(len(dsets.valid), 10, False)
+test_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])
+test_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=None, inplace=True)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=None, inplace=False)
+assert id(X_on_disk) == id(dsets.tls[0].items) == id(dsets.ptls[0].items)
+assert id(X_on_disk) == id(dsets.train.tls[0].items) == id(dsets.train.ptls[0].items)
+
+idxs = random_choice(len(dsets), 10, False)
+test_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])
+test_eq(dsets[idxs][1].numpy(), y_array[idxs])
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)
+test_eq(dsets.train[0:10], dsets.add_dataset(X_on_disk[0:10], y_array[0:10])[:])
+test_eq(dsets.train[0:10][0], dsets.add_dataset(X_on_disk[0:10])[:][0])
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)
+torch.save(dsets, 'export/dsets.pth')
+del dsets
+dsets = torch.load('export/dsets.pth')
+dsets
+
+
(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1))] ...]
+
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)
+torch.save(dsets.train, 'export/dsets.pth')
+del dsets
+dsets = torch.load('export/dsets.pth')
+dsets
+
+
(#30) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1))] ...]
+
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)
+test_eq(len(dsets.train), len(X_train))
+dsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)
+test_eq(len(dsets.train), len(X_train))
+dsets = TSDatasets(X_on_disk, y_array, tfms=[add(1), TSCategorize()], splits=splits)
+test_eq(len(dsets.train), len(X_train))
+# test_eq(dsets.train[0][0].data, tensor(X_train[0] + 1))
+test_eq(dsets.train[0][1].item(), y_tensor[0])
+
+
+
dsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSCategorize()], splits=splits)
+test_eq(len(dsets.add_test(X_train, y_train)), len(X_train))
+test_eq(len(dsets.add_unlabeled(X_train)), len(X_train))
+
+
+
X_tensor = torch.randn(100, 4, 50)
+y_tensor = torch.randint(0, 2, size=(len(X_tensor),))
+tensor_splits = (np.arange(80), np.arange(80, 100))
+dsets = TSDatasets(X_tensor, y_tensor, tfms=[None, TSClassification()], splits=tensor_splits)
+test_eq(type(dsets[0][0]), TSTensor)
+
+
+

source

+
+
+

TSDataLoader

+
+
 TSDataLoader (dataset, bs=64, shuffle=False, drop_last=False,
+               num_workers=0, verbose=False, do_setup=True, vocab=None,
+               sort=False, weights=None, partial_n=None, sampler=None,
+               pin_memory=False, timeout=0, batch_size=None, indexed=None,
+               n=None, device=None, persistent_workers=False,
+               pin_memory_device='', wif=None, before_iter=None,
+               after_item=None, before_batch=None, after_batch=None,
+               after_iter=None, create_batches=None, create_item=None,
+               create_batch=None, retain=None, get_idxs=None, sample=None,
+               shuffle_fn=None, do_batch=None)
+
+

Transformed DataLoader

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
datasetMap- or iterable-style dataset from which to load the data
bsint64Size of batch
shuffleboolFalseWhether to shuffle data
drop_lastboolFalse
num_workersintNoneNumber of CPU cores to use in parallel (default: All available up to 16)
verboseboolFalseWhether to print verbose logs
do_setupboolTrueWhether to run setup() for batch transform(s)
vocabNoneTypeNone
sortboolFalse
weightsNoneTypeNone
partial_nNoneTypeNone
samplerNoneTypeNone
pin_memoryboolFalse
timeoutint0
batch_sizeNoneTypeNone
indexedNoneTypeNone
nNoneTypeNone
deviceNoneTypeNone
persistent_workersboolFalse
pin_memory_devicestr
wifNoneTypeNone
before_iterNoneTypeNone
after_itemNoneTypeNone
before_batchNoneTypeNone
after_batchNoneTypeNone
after_iterNoneTypeNone
create_batchesNoneTypeNone
create_itemNoneTypeNone
create_batchNoneTypeNone
retainNoneTypeNone
get_idxsNoneTypeNone
sampleNoneTypeNone
shuffle_fnNoneTypeNone
do_batchNoneTypeNone
+
+

source

+
+
+

NumpyDataLoader

+
+
 NumpyDataLoader (dataset, bs=64, shuffle=False, drop_last=False,
+                  num_workers=0, verbose=False, do_setup=True, vocab=None,
+                  sort=False, weights=None, partial_n=None, sampler=None,
+                  pin_memory=False, timeout=0, batch_size=None,
+                  indexed=None, n=None, device=None,
+                  persistent_workers=False, pin_memory_device='',
+                  wif=None, before_iter=None, after_item=None,
+                  before_batch=None, after_batch=None, after_iter=None,
+                  create_batches=None, create_item=None,
+                  create_batch=None, retain=None, get_idxs=None,
+                  sample=None, shuffle_fn=None, do_batch=None)
+
+

Transformed DataLoader

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
datasetMap- or iterable-style dataset from which to load the data
bsint64Size of batch
shuffleboolFalseWhether to shuffle data
drop_lastboolFalse
num_workersintNoneNumber of CPU cores to use in parallel (default: All available up to 16)
verboseboolFalseWhether to print verbose logs
do_setupboolTrueWhether to run setup() for batch transform(s)
vocabNoneTypeNone
sortboolFalse
weightsNoneTypeNone
partial_nNoneTypeNone
samplerNoneTypeNone
pin_memoryboolFalse
timeoutint0
batch_sizeNoneTypeNone
indexedNoneTypeNone
nNoneTypeNone
deviceNoneTypeNone
persistent_workersboolFalse
pin_memory_devicestr
wifNoneTypeNone
before_iterNoneTypeNone
after_itemNoneTypeNone
before_batchNoneTypeNone
after_batchNoneTypeNone
after_iterNoneTypeNone
create_batchesNoneTypeNone
create_itemNoneTypeNone
create_batchNoneTypeNone
retainNoneTypeNone
get_idxsNoneTypeNone
sampleNoneTypeNone
shuffle_fnNoneTypeNone
do_batchNoneTypeNone
+
+

source

+
+
+

TSDataLoaders

+
+
 TSDataLoaders (*loaders, path='.', device=None)
+
+

Basic wrapper around several DataLoaders.

+
+

source

+
+
+

NumpyDataLoaders

+
+
 NumpyDataLoaders (*loaders, path='.', device=None)
+
+

Basic wrapper around several DataLoaders.

+
+

source

+
+
+

StratifiedSampler

+
+
 StratifiedSampler (y, bs:int=64, shuffle:bool=False,
+                    drop_last:bool=False)
+
+

Sampler where batches preserve the percentage of samples for each class

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
yThe target variable for supervised learning problems. Stratification is done based on the y labels.
bsint64Batch size
shuffleboolFalseFlag to shuffle each class’s samples before splitting into batches.
drop_lastboolFalseFlag to drop the last incomplete batch.
+
+
a = np.concatenate([np.zeros(90), np.ones(10)])
+sampler = StratifiedSampler(a, bs=32, shuffle=True, drop_last=True)
+idxs = np.array(list(iter(sampler)))
+print(idxs[:32])
+print(a[idxs][:32])
+test_eq(a[idxs][:32].mean(), .1)
+
+
[[ 0  2  8 17 18 21 27 29 34 38 39 43 45 48 52 54 55 60 61 63 66 67 68 69
+  71 73 78 80 81 84 90 92 95 99  1  6 11 12 15 16 20 23 24 28 30 33 36 37
+  40 41 42 44 49 59 62 64 65 74 75 76 77 79 86 87 91 93 96  3  4  5  7  9
+  10 13 14 19 22 25 26 31 32 35 46 47 50 51 53 56 57 58 70 72 82 83 85 88
+  89 94 97 98]]
+[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+  0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+  0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 0. 0. 0. 0. 0.
+  0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+  0. 1. 1. 1.]]
+
+
+
+

source

+
+
+

get_c

+
+
 get_c (dls)
+
+
+

source

+
+
+

get_best_dls_params

+
+
 get_best_dls_params (dls, n_iters=10, num_workers=[0, 1, 2, 4, 8],
+                      pin_memory=[True, False], prefetch_factor=[2, 4, 8],
+                      return_best=True, verbose=True)
+
+
+

source

+
+
+

get_best_dl_params

+
+
 get_best_dl_params (dl, n_iters=10, num_workers=[0, 1, 2, 4, 8],
+                     pin_memory=[True, False], prefetch_factor=[2, 4, 8],
+                     return_best=True, verbose=True)
+
+
+

source

+
+
+

get_ts_dls

+
+
 get_ts_dls (X, y=None, splits=None, sel_vars=None, sel_steps=None,
+             tfms=None, inplace=True, path='.', bs=64, batch_tfms=None,
+             num_workers=0, device=None, shuffle_train=True,
+             drop_last=True, weights=None, partial_n=None, sampler=None,
+             sort=False, **kwargs)
+
+
+
# Tests
+a = np.arange(10)
+
+for s in [None, np.arange(10), np.arange(10).tolist(), L(np.arange(10).tolist()), (np.arange(10).tolist(), None), (np.arange(10).tolist(), L())]:
+    test_eq(_check_splits(a, s), (L(np.arange(10).tolist()), L()))
+
+
+

source

+
+
+

get_subset_dl

+
+
 get_subset_dl (dl, idxs)
+
+
+

source

+
+
+

get_ts_dl

+
+
 get_ts_dl (X, y=None, split=None, sel_vars=None, sel_steps=None,
+            tfms=None, inplace=True, path='.', bs=64, batch_tfms=None,
+            num_workers=0, device=None, shuffle_train=True,
+            drop_last=True, weights=None, partial_n=None, sampler=None,
+            sort=False, **kwargs)
+
+
+
X, y, splits = get_UCR_data(dsid, on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)
+dls = get_best_dls_params(dls, prefetch_factor=[2, 4, 8, 16])
+
+

+Dataloader 0
+
+   num_workers:  0  pin_memory: True   prefetch_factor:  2  -  time:    1.400 ms/iter
+   num_workers:  0  pin_memory: False  prefetch_factor:  2  -  time:    0.620 ms/iter
+
+   best dl params:
+       best num_workers    : 0
+       best pin_memory     : False
+       best prefetch_factor: 2
+       return_best         : True
+
+
+
+Dataloader 1
+
+   num_workers:  0  pin_memory: True   prefetch_factor:  2  -  time:    0.261 ms/iter
+   num_workers:  0  pin_memory: False  prefetch_factor:  2  -  time:    0.306 ms/iter
+
+   best dl params:
+       best num_workers    : 0
+       best pin_memory     : True
+       best prefetch_factor: 2
+       return_best         : True
+
+
+
+
+
+
y_int = np.random.randint(0, 4, size=len(X))
+dls = get_ts_dls(X, y_int, splits=splits, bs=8)
+test_eq(hasattr(dls, "vocab"), False)
+
+dls = get_ts_dls(X, y_int, splits=splits, bs=8, vocab=[0,1,2,3])
+test_eq(dls.vocab, [0,1,2,3])
+test_eq(dls.c, 4)
+test_eq(dls.cat, True)
+
+
+
X, y, splits = get_UCR_data(dsid, on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)
+b=first(dls.train)
+dls.decode(b)
+test_eq(X.shape[1], dls.vars)
+test_eq(X.shape[-1], dls.len)
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=True)
+
+idxs = random_choice(len(dls.valid_ds), 10, False)
+new_dl = get_subset_dl(dls.train, idxs)
+
+idxs = random_choice(len(dls.valid_ds), 10, False)
+new_dl = get_subset_dl(dls.valid, idxs)
+test_eq(new_dl.one_batch()[0].cpu().numpy(), X[splits[1][idxs]])
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+weights = np.random.rand(len(X))
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=True, weights=weights)
+weights2 = weights[splits[0]] / weights[splits[0]].sum()
+test_eq(dls.train.weights, weights2)
+test_eq(dls.valid.weights, None)
+
+
+
partial_n = 12
+X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=64, inplace=True, partial_n=partial_n)
+test_eq(len(dls.train.one_batch()[0]), partial_n)
+
+partial_n = .1
+X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], bs=64, inplace=True, partial_n=partial_n)
+test_eq(len(dls.train.one_batch()[0]), int(round(len(dls.train.dataset) * partial_n)))
+
+

You’ll now be able to pass a sampler to a tsai dataloader.

+

You should use a sampler for the train set and a sampler for the validation set. You’ll need to pass an object with the same length as each dataset. For example, the splits like in the case below.

+

⚠️ Remember to set shuffle=False when using a sampler since they a mutually exclusive. This means that when you use a sampler, you always need to set the shuffle in the dataloader to False. The sampler will control whether the indices are shuffled or not (you can set shuffle to True or False in the sampler).

+

drop_last is managed in the dataloder though.

+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+train_sampler = torch.utils.data.sampler.RandomSampler(splits[0])
+valid_sampler = torch.utils.data.sampler.SequentialSampler(splits[1])
+dls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=8, inplace=True,
+                 shuffle=False, drop_last=True, sampler=[train_sampler, valid_sampler])
+print('train')
+for _ in dls.train:
+    print(dls.train.idxs)
+print('valid')
+for _ in dls.valid:
+    print(dls.valid.idxs)
+
+
train
+[22, 25, 16, 3, 26, 28, 7, 18]
+[5, 4, 12, 27, 29, 24, 9, 11]
+[0, 2, 8, 17, 21, 20, 23, 10]
+valid
+[0, 1, 2, 3, 4, 5, 6, 7]
+[8, 9, 10, 11, 12, 13, 14, 15]
+[16, 17, 18, 19, 20, 21, 22, 23]
+[24, 25, 26, 27, 28, 29]
+
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+train_sampler = torch.utils.data.sampler.SequentialSampler(splits[0])
+valid_sampler = torch.utils.data.sampler.SequentialSampler(splits[1])
+dls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=64, inplace=True,
+                 shuffle=False, sampler=[train_sampler, valid_sampler])
+test_eq(dls.get_idxs(), np.arange(len(splits[0])))
+test_eq(dls.train.get_idxs(), np.arange(len(splits[0])))
+test_eq(dls.valid.get_idxs(), np.arange(len(splits[1])))
+xb = dls.valid.one_batch()[0].cpu().numpy()
+test_close(xb, X[dls.valid.split_idxs])
+
+X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+train_sampler = torch.utils.data.sampler.RandomSampler(splits[0])
+valid_sampler = torch.utils.data.sampler.SequentialSampler(splits[0])
+dls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=32, inplace=True,
+                 shuffle=False, drop_last=True, sampler=[train_sampler, valid_sampler])
+test_ne(dls.train.get_idxs(), np.arange(len(splits[0])))
+test_eq(np.sort(dls.train.get_idxs()), np.arange(len(splits[0])))
+test_eq(dls.valid.get_idxs(), np.arange(len(splits[1])))
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=False)
+
+idxs = random_choice(len(dls.valid_ds), 10, False)
+new_dl = get_subset_dl(dls.train, idxs)
+
+idxs = random_choice(len(dls.valid_ds), 10, False)
+new_dl = get_subset_dl(dls.valid, idxs)
+test_eq(new_dl.one_batch()[0].cpu().numpy(), X[splits[1][idxs]])
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)
+b = dls.one_batch()
+input_idxs = dls.input_idxs
+test_eq(b[0].cpu().numpy(), X[input_idxs])
+b = dls.train.one_batch()
+input_idxs = dls.train.input_idxs
+test_eq(b[0].cpu().numpy(), X[input_idxs])
+assert max(input_idxs) < len(splits[0])
+b = dls.valid.one_batch()
+input_idxs = dls.valid.input_idxs
+test_eq(b[0].cpu().numpy(), X[input_idxs])
+assert min(input_idxs) >= len(splits[0])
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8)
+b=first(dls.train)
+dls.decode(b)
+test_eq(X.shape[1], dls.vars)
+test_eq(X.shape[-1], dls.len)
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8, weights=np.random.randint(0, 3, len(y)))
+b=first(dls.train)
+dls.decode(b)
+test_eq(X.shape[1], dls.vars)
+test_eq(X.shape[-1], dls.len)
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)
+ts_dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, device=default_device(), bs=4)
+torch.save(ts_dls, 'export/ts_dls.pth')
+del ts_dls
+ts_dls = torch.load('export/ts_dls.pth')
+for xb,yb in ts_dls.train:
+    test_eq(tensor(X[ts_dls.train.idxs]), xb.cpu())
+
+
+
X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=4)
+for xb,yb in dls.train:
+    test_eq(xb.cpu().numpy(), X[dls.train.input_idxs])
+for xb,yb in dls.valid:
+    test_eq(xb.cpu().numpy(), X[dls.valid.input_idxs])
+
+
+
test_eq((ts_dls.train.shuffle, ts_dls.valid.shuffle, ts_dls.train.drop_last, ts_dls.valid.drop_last), (True, False, True, False))
+
+
+
dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8, num_workers=0)
+xb, yb = first(dls.train)
+test_eq(tensor(X[dls.train.idxs]), xb.cpu())
+
+
+
test_eq((dls.train.shuffle, dls.valid.shuffle, dls.train.drop_last, dls.valid.drop_last), (True, False, True, False))
+
+
+
# multiclass
+dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)
+dls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, inplace=True)
+dls.show_dist()
+dls.train.show_dist()
+xb,yb = first(dls.train)
+test_eq((dls.cat, dls.c), (True, 4))
+test_ne(dls.cws.cpu().numpy(), None)
+dls.decoder((xb, ))
+dls.decoder((xb[0], ))
+dls.decoder((xb, yb))
+dls.decoder((xb[0], yb[0]))
+dls.decoder(yb)
+dls.decoder(yb[0])
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
'1'
+
+
+
+
new_dl = dls.new_dl(X)
+first(new_dl)
+
+
(TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32),)
+
+
+
+
new_dl = dls.new_dl(X, y=y)
+first(new_dl)
+
+
(TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32),
+ TensorCategory([2, 3, 2, 2, 0, 1, 1, 3, 3, 1, 2, 0, 0, 3, 0, 1, 0, 3, 3, 3, 1,
+                 3, 3, 3, 3, 3, 0, 3, 1, 1, 3, 3, 2, 3, 3, 3, 1, 1, 3, 2, 3, 0,
+                 3, 0, 3, 1, 1, 2, 1, 1, 1, 3, 3, 1, 2, 1, 1, 3, 0, 0]))
+
+
+
+
dls.train.dataset.split_idxs, dls.train.dataset.splits, dls.valid.split_idxs
+
+
(array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
+        17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=int8),
+ (#30) [0,1,2,3,4,5,6,7,8,9...],
+ array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+        47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59], dtype=int8))
+
+
+
+
# 2d input array and tfms == None return a NoTfmLists object
+X, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)
+X = X[:, 0]
+tfms=[None, TSCategorize()]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, bs=8)
+test_eq(1, dls.vars)
+test_eq(X.shape[-1], dls.len)
+test_eq(type(dls.tls[0]).__name__, 'NoTfmLists')
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, bs=8, inplace=False)
+test_eq(1, dls.vars)
+test_eq(X.shape[-1], dls.len)
+test_eq(type(dls.tls[0]).__name__, 'NoTfmLists')
+
+
+
# regression
+dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)
+dls = get_ts_dls(X, np.random.rand(60, ), tfms=[None, ToNumpyTensor], splits=splits)
+dls.show_dist()
+dls.train.show_dist()
+xb,yb = first(dls.train)
+dls.decoder((xb, ))
+dls.decoder((xb[0], ))
+dls.decoder((xb, yb))
+dls.decoder((xb[0], yb[0]))
+dls.decoder(yb)
+dls.decoder(yb[0])
+test_eq((dls.cat, dls.c), (False, 1))
+test_eq(dls.cws, None)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# regression, multilabel
+dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)
+dls = get_ts_dls(X, np.random.rand(60, 3) * 5, tfms=[None, ToNumpyTensor], splits=splits)
+dls.show_dist()
+dls.train.show_dist()
+xb,yb = first(dls.train)
+dls.decoder((xb, ))
+dls.decoder((xb[0], ))
+dls.decoder((xb, yb))
+dls.decoder((xb[0], yb[0]))
+dls.decoder(yb)
+dls.decoder(yb[0])
+test_eq((dls.cat, dls.c, dls.d),(False, 1, 3))
+test_eq(dls.cws, None)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# multiclass, multilabel
+dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)
+cm = {
+    '1':'A',
+    '2':['B', 'C'],
+    '3':['B', 'D'] ,
+    '4':'E',
+    }
+keys = cm.keys()
+new_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])}
+y_multi = np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y], dtype=object)
+dls = get_ts_dls(X, y_multi, tfms=[None, TSMultiLabelClassification()], splits=splits)
+dls.show_dist()
+dls.train.show_dist()
+xb,yb = first(dls.train)
+dls.decoder((xb, ))
+dls.decoder((xb[0], ))
+dls.decoder((xb, yb))
+dls.decoder((xb[0], yb[0]))
+dls.decoder(yb)
+dls.decoder(yb[0])
+test_eq((dls.cat, dls.c), (True, 5))
+test_ne(dls.cws.cpu().numpy(), None)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)
+cm = {
+    '1':'A',
+    '2':['B', 'C'],
+    '3':['B', 'D'] ,
+    '4':'E',
+    }
+keys = cm.keys()
+new_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])}
+y_multi = np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y], dtype=object)
+dls = get_ts_dls(X, y_multi, tfms=[None, TSMultiLabelClassification()], splits=splits)
+test_eq(dls.new(X[0]).one_batch().shape, (1, 570))
+test_eq(dls.new(X[:15]).one_batch().shape, (15, 1, 570))
+test_eq(dls.train.new(X[0]).one_batch().shape, (1, 570))
+test_eq(dls.valid.new(X[:15]).one_batch().shape, (15, 1, 570))
+
+
+
bs = 25
+dsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)
+dls   = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2], batch_tfms=add(1), num_workers=0)
+xb,yb = dls.train.one_batch()
+test_eq(xb.cpu().data, tensor(X_on_disk[splits[0]][dls.train.idxs]) + 1)
+
+
+
dsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)
+dls   = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])
+xb,yb = dls.train.one_batch()
+test_eq(xb.shape, (min(bs, len(splits[0])), X.shape[1], X.shape[-1]))
+it = iter(dls.valid)
+for xb,yb in it:
+    test_close(xb.cpu(), TSTensor(X[splits[1]][dls.valid.idxs]))
+
+
+
bs = 64
+dsets = TSDatasets(X, y, tfms=[add(1), TSCategorize()], splits=RandomSplitter(valid_pct=.3)(y_array))
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])
+xb,yb = dls.train.one_batch()
+test_eq(xb.shape, (min(bs, len(dsets.train)), X_on_disk.shape[1], X_on_disk.shape[-1]))
+xb,yb = dls.valid.one_batch()
+test_eq(xb.shape, (min(bs*2, len(dsets.valid)), X_on_disk.shape[1], X_on_disk.shape[-1]))
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()], splits=splits)
+dls   = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[32, 64])
+for i in range(10):
+    dl = dls.train if random.random() < .5 else dls.valid
+    xb,yb = dl.one_batch()
+    torch.equal(xb.cpu(), TSTensor(X_on_disk[dl.input_idxs]))
+
+dsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()])
+dls   = TSDataLoaders.from_dsets(dsets, bs=32)
+for i in range(10):
+    xb,yb = dls.one_batch()
+    torch.equal(xb.cpu(), TSTensor(X_on_disk[dl.input_idxs]))
+
+dsets = TSDatasets(X_on_disk, tfms=None)
+dls   = TSDataLoaders.from_dsets(dsets, bs=32)
+for i in range(10):
+    xb = dls.one_batch()
+    torch.equal(xb[0].cpu(), TSTensor(X_on_disk[dl.input_idxs]))
+
+
+
dsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()])
+dls   = TSDataLoaders.from_dsets(dsets, bs=32)
+test_eq(dls.split_idxs, L(np.arange(len(X_on_disk)).tolist()))
+
+
+
X, y, splits = get_UCR_data('NATOPS', return_split=False)
+tfms  = [None, [TSCategorize()]]
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, bs=[64, 128])
+dls.show_batch()
+dls.show_dist()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# test passing a list with categories instead of a numpy array
+dsid = 'NATOPS'
+bs = 64
+X2, y2, splits2 = get_UCR_data(dsid, return_split=False)
+vocab = sorted(set(y))
+tfms = [None, [TSCategorize(vocab=vocab)]]
+dsets = TSDatasets(X2, y2, tfms=tfms, splits=splits2)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])
+dls.train.one_batch()
+
+
(TSTensor(samples:64, vars:24, len:51, device=cpu, dtype=torch.float32),
+ TensorCategory([0, 3, 0, 5, 0, 0, 5, 3, 3, 1, 2, 0, 0, 2, 5, 2, 2, 4, 5, 3, 2,
+                 4, 2, 1, 1, 0, 1, 2, 0, 4, 4, 4, 4, 2, 0, 0, 3, 3, 0, 5, 4, 3,
+                 2, 5, 5, 2, 2, 4, 3, 0, 2, 4, 4, 5, 5, 0, 5, 3, 2, 1, 0, 3, 4,
+                 2]))
+
+
+
+
# MultiCategory
+bs = 64
+n_epochs = 100
+tfms = [None, [MultiCategorize()]]
+dsets = TSDatasets(X2, y2, tfms=tfms, splits=splits2)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=bs)
+dls.train.one_batch()
+
+
(TSTensor(samples:64, vars:24, len:51, device=cpu, dtype=torch.float32),
+ TensorMultiCategory([[7, 0, 1],
+                      [4, 0, 1],
+                      [7, 0, 1],
+                      [5, 0, 1],
+                      [2, 0, 1],
+                      [2, 0, 1],
+                      [2, 0, 1],
+                      [7, 0, 1],
+                      [5, 0, 1],
+                      [3, 0, 1],
+                      [6, 0, 1],
+                      [7, 0, 1],
+                      [3, 0, 1],
+                      [6, 0, 1],
+                      [7, 0, 1],
+                      [7, 0, 1],
+                      [6, 0, 1],
+                      [7, 0, 1],
+                      [5, 0, 1],
+                      [3, 0, 1],
+                      [3, 0, 1],
+                      [7, 0, 1],
+                      [7, 0, 1],
+                      [2, 0, 1],
+                      [4, 0, 1],
+                      [4, 0, 1],
+                      [2, 0, 1],
+                      [4, 0, 1],
+                      [6, 0, 1],
+                      [2, 0, 1],
+                      [2, 0, 1],
+                      [5, 0, 1],
+                      [2, 0, 1],
+                      [5, 0, 1],
+                      [4, 0, 1],
+                      [7, 0, 1],
+                      [2, 0, 1],
+                      [3, 0, 1],
+                      [4, 0, 1],
+                      [6, 0, 1],
+                      [2, 0, 1],
+                      [7, 0, 1],
+                      [2, 0, 1],
+                      [3, 0, 1],
+                      [4, 0, 1],
+                      [5, 0, 1],
+                      [5, 0, 1],
+                      [2, 0, 1],
+                      [5, 0, 1],
+                      [2, 0, 1],
+                      [3, 0, 1],
+                      [5, 0, 1],
+                      [6, 0, 1],
+                      [7, 0, 1],
+                      [5, 0, 1],
+                      [2, 0, 1],
+                      [7, 0, 1],
+                      [4, 0, 1],
+                      [5, 0, 1],
+                      [6, 0, 1],
+                      [7, 0, 1],
+                      [4, 0, 1],
+                      [7, 0, 1],
+                      [3, 0, 1]]))
+
+
+

The combination of splits, sel_vars and sel_steps is very powerful, as it allows you to perform advanced indexing of the array-like X.

+
+
from tsai.data.validation import TSSplitter
+
+
+
X = np.arange(16*5*50).reshape(16,5,50)
+y = alphabet[np.random.randint(0,3, 16)]
+splits = TSSplitter(show_plot=False)(y)
+tfms = [None, TSCategorize()]
+batch_tfms = None
+dls = get_ts_dls(X, y, splits=splits, sel_vars=[0, 1, 3], sel_steps=slice(-10, None), tfms=tfms, batch_tfms=batch_tfms)
+xb,yb=dls.train.one_batch()
+test_close(X[dls.input_idxs][:, [0, 1, 3]][...,slice(-10, None)], xb.cpu().numpy())
+new_dl = dls.train.new_dl(X[:5], y[:5])
+print(new_dl.one_batch())
+new_empty_dl = dls.new_empty() # when exported
+dl = new_empty_dl.new_dl(X[:10], y[:10], bs=64) # after export
+dl.one_batch()
+
+
(TSTensor(samples:5, vars:3, len:10, device=cpu, dtype=torch.int64), TensorCategory([2, 2, 2, 2, 2]))
+
+
+
(TSTensor(samples:10, vars:3, len:10, device=cpu, dtype=torch.int64),
+ TensorCategory([2, 2, 2, 0, 2, 2, 0, 2, 1, 1]))
+
+
+
+

source

+
+
+

get_dl_percent_per_epoch

+
+
 get_dl_percent_per_epoch (dl, model, n_batches=None)
+
+
+

source

+
+
+

get_time_per_batch

+
+
 get_time_per_batch (dl, model=None, n_batches=None)
+
+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms  = [None, [TSCategorize()]]
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits)
+train_dl = dls.train
+xb, _ = train_dl.one_batch()
+model = nn.Linear(xb.shape[-1], 2).to(xb.device)
+t = get_dl_percent_per_epoch(train_dl, model, n_batches=10)
+print(t)
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
93.70%
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.external.html b/data.external.html new file mode 100644 index 000000000..e72aa3b6b --- /dev/null +++ b/data.external.html @@ -0,0 +1,2178 @@ + + + + + + + + + +tsai - External data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

External data

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Helper functions used to download and extract common time series datasets.

+
+
+

source

+
+

decompress_from_url

+
+
 decompress_from_url (url, target_dir=None, verbose=False)
+
+
+

source

+
+
+

download_data

+
+
 download_data (url, fname=None, c_key='archive', force_download=False,
+                timeout=4, verbose=False)
+
+

Download url to fname.

+
+

source

+
+
+

get_UCR_univariate_list

+
+
 get_UCR_univariate_list ()
+
+
+

source

+
+
+

get_UCR_multivariate_list

+
+
 get_UCR_multivariate_list ()
+
+
+

source

+
+
+

get_UCR_data

+
+
 get_UCR_data (dsid, path='.', parent_dir='data/UCR', on_disk=True,
+               mode='c', Xdtype='float32', ydtype=None, return_split=True,
+               split_data=True, force_download=False, verbose=False)
+
+
+
from fastai.data.transforms import get_files
+
+
+
PATH = Path('.')
+dsids = ['ECGFiveDays', 'AtrialFibrillation'] # univariate and multivariate
+for dsid in dsids:
+    print(dsid)
+    tgt_dir = PATH/f'data/UCR/{dsid}'
+    if os.path.isdir(tgt_dir): shutil.rmtree(tgt_dir)
+    test_eq(len(get_files(tgt_dir)), 0) # no file left
+    X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)
+    test_eq(len(get_files(tgt_dir, '.npy')), 6)
+    test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir
+    del X_train, y_train, X_valid, y_valid
+    X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)
+    test_eq(X_train.ndim, 3)
+    test_eq(y_train.ndim, 1)
+    test_eq(X_valid.ndim, 3)
+    test_eq(y_valid.ndim, 1)
+    test_eq(len(get_files(tgt_dir, '.npy')), 6)
+    test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir
+    test_eq(X_train.ndim, 3)
+    test_eq(y_train.ndim, 1)
+    test_eq(X_valid.ndim, 3)
+    test_eq(y_valid.ndim, 1)
+    test_eq(X_train.dtype, np.float32)
+    test_eq(X_train.__class__.__name__, 'memmap')
+    del X_train, y_train, X_valid, y_valid
+    X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, on_disk=False)
+    test_eq(X_train.__class__.__name__, 'ndarray')
+    del X_train, y_train, X_valid, y_valid
+
+
ECGFiveDays
+AtrialFibrillation
+
+
+
+
X_train, y_train, X_valid, y_valid = get_UCR_data('natops')
+
+
+
dsid = 'natops' 
+X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, verbose=True)
+X, y, splits = get_UCR_data(dsid, split_data=False)
+test_eq(X[splits[0]], X_train)
+test_eq(y[splits[1]], y_valid)
+test_eq(X[splits[0]], X_train)
+test_eq(y[splits[1]], y_valid)
+test_type(X, X_train)
+test_type(y, y_train)
+
+
Dataset: NATOPS
+X_train: (180, 24, 51)
+y_train: (180,)
+X_valid: (180, 24, 51)
+y_valid: (180,) 
+
+
+
+
+

source

+
+
+

check_data

+
+
 check_data (X, y=None, splits=None, show_plot=True)
+
+
+
dsid = 'ECGFiveDays'
+X, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)
+check_data(X, y, splits)
+check_data(X[:, 0], y, splits)
+y = y.astype(np.float32)
+check_data(X, y, splits)
+y[:10] = np.nan
+check_data(X[:, 0], y, splits)
+X, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)
+splits = get_splits(y, 3)
+check_data(X, y, splits)
+check_data(X[:, 0], y, splits)
+y[:5]= np.nan
+check_data(X[:, 0], y, splits)
+X, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)
+
+
X      - shape: [884 samples x 1 features x 136 timesteps]  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:<U1  n_classes: 2 (442 samples per class) ['1', '2']  isnan: False
+splits - n_splits: 2 shape: [23, 861]  overlap: False
+X      - shape: (884, 136)  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:<U1  n_classes: 2 (442 samples per class) ['1', '2']  isnan: False
+splits - n_splits: 2 shape: [23, 861]  overlap: False
+X      - shape: [884 samples x 1 features x 136 timesteps]  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:float32  isnan: 0
+splits - n_splits: 2 shape: [23, 861]  overlap: False
+X      - shape: (884, 136)  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:float32  isnan: 10
+splits - n_splits: 2 shape: [23, 861]  overlap: False
+X      - shape: [884 samples x 1 features x 136 timesteps]  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:<U1  n_classes: 2 (442 samples per class) ['1', '2']  isnan: False
+splits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]]  overlap: [False, False, False]
+X      - shape: (884, 136)  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:<U1  n_classes: 2 (442 samples per class) ['1', '2']  isnan: False
+splits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]]  overlap: [False, False, False]
+X      - shape: (884, 136)  type: ndarray  dtype:float32  isnan: 0
+y      - shape: (884,)  type: ndarray  dtype:<U1  n_classes: 3 (294 samples per class) ['1', '2', 'n']  isnan: False
+splits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]]  overlap: [False, False, False]
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_70492/278801922.py:23: UserWarning: y contains nan values
+  warnings.warn('y contains nan values')
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

get_Monash_regression_list

+
+
 get_Monash_regression_list ()
+
+
+

source

+
+
+

get_Monash_regression_data

+
+
 get_Monash_regression_data (dsid, path='./data/Monash', on_disk=True,
+                             mode='c', Xdtype='float32', ydtype=None,
+                             split_data=True, force_download=False,
+                             verbose=False, timeout=4)
+
+
+
dsid = "Covid3Month"
+X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid, on_disk=False, split_data=True, force_download=False)
+X, y, splits = get_Monash_regression_data(dsid, on_disk=True, split_data=False, force_download=False, verbose=True)
+if X_train is not None: 
+    test_eq(X_train.shape, (140, 1, 84))
+if X is not None: 
+    test_eq(X.shape, (201, 1, 84))
+
+
Dataset: Covid3Month
+X      : (201, 1, 84)
+y      : (201,)
+splits : (#140) [0,1,2,3,4,5,6,7,8,9...] (#61) [140,141,142,143,144,145,146,147,148,149...] 
+
+
+
+
+

source

+
+
+

get_forecasting_list

+
+
 get_forecasting_list ()
+
+
+

source

+
+
+

get_forecasting_time_series

+
+
 get_forecasting_time_series (dsid, path='./data/forecasting/',
+                              force_download=False, verbose=True,
+                              **kwargs)
+
+
+
ts = get_forecasting_time_series("sunspots", force_download=False)
+test_eq(len(ts), 2820)
+ts
+
+
Dataset: Sunspots
+downloading data...
+...done. Path = data/forecasting/Sunspots.csv
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Sunspots
Month
1749-01-3158.0
1749-02-2862.6
1749-03-3170.0
1749-04-3055.7
1749-05-3185.0
......
1983-08-3171.8
1983-09-3050.3
1983-10-3155.8
1983-11-3033.3
1983-12-3133.4
+ +

2820 rows × 1 columns

+
+
+
+
+
ts = get_forecasting_time_series("weather", force_download=False)
+if ts is not None: 
+    test_eq(len(ts), 70091)
+    display(ts)
+
+
Dataset: Weather
+downloading data...
+...done. Path = data/forecasting/Weather.csv.zip
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
p (mbar)T (degC)Tpot (K)Tdew (degC)rh (%)VPmax (mbar)VPact (mbar)VPdef (mbar)sh (g/kg)H2OC (mmol/mol)rho (g/m**3)WxWymax Wxmax WyDay sinDay cosYear sinYear cos
0996.50-8.05265.38-8.7894.403.333.140.191.963.151307.86-0.204862-0.046168-0.614587-0.138503-1.776611e-121.0000000.0093320.999956
1996.62-8.88264.54-9.7793.203.122.900.211.812.911312.25-0.245971-0.044701-0.619848-0.1126452.588190e-010.9659260.0100490.999950
2996.84-8.81264.59-9.6693.503.132.930.201.832.941312.18-0.1755270.039879-0.6143440.1395765.000000e-010.8660250.0107660.999942
3996.99-9.05264.34-10.0292.603.072.850.231.782.851313.61-0.050000-0.086603-0.190000-0.3290907.071068e-010.7071070.0114830.999934
4997.46-9.63263.72-10.6592.202.942.710.231.692.711317.19-0.3682020.156292-0.8100440.3438438.660254e-010.5000000.0121990.999926
............................................................
700861002.18-0.98272.01-5.3672.005.694.091.592.544.081280.70-0.855154-0.160038-1.336792-0.250174-9.990482e-010.0436190.0061830.999981
700871001.40-1.40271.66-6.8466.295.513.651.862.273.651281.87-0.716196-0.726267-1.348134-1.367090-9.537170e-010.3007060.0069000.999976
700881001.19-2.75270.32-6.9072.904.993.641.352.263.631288.02-0.6615010.257908-1.4534380.566672-8.433914e-010.5373000.0076170.999971
700891000.65-2.89270.22-7.1572.304.933.571.372.223.571288.03-0.280621-0.209169-0.545207-0.406385-6.755902e-010.7372770.0083340.999965
700901000.11-3.93269.23-8.0972.604.563.311.252.063.311292.41-0.516998-0.215205-0.923210-0.384295-4.617486e-010.8870110.0090500.999959
+ +

70091 rows × 19 columns

+
+
+
+
+

source

+
+
+

convert_tsf_to_dataframe

+
+
 convert_tsf_to_dataframe (full_file_path_and_name,
+                           replace_missing_vals_with='NaN',
+                           value_column_name='series_value')
+
+
+

source

+
+
+

get_Monash_forecasting_data

+
+
 get_Monash_forecasting_data (dsid, path='./data/forecasting/',
+                              force_download=False,
+                              remove_from_disk=False, add_timestamp=True,
+                              verbose=True)
+
+
+

source

+
+
+

get_fcst_horizon

+
+
 get_fcst_horizon (frequency, dsid)
+
+
+

source

+
+
+

preprocess_Monash_df

+
+
 preprocess_Monash_df (df, frequency)
+
+
+
dsid = 'covid_deaths_dataset'
+df = get_Monash_forecasting_data(dsid, force_download=False)
+if df is not None: 
+    test_eq(df.shape, (56392, 3))
+
+
Dataset: covid_deaths_dataset
+downloading data...
+...data downloaded
+decompressing data...
+...data decompressed
+converting data to dataframe...
+...done
+
+freq                   : daily
+forecast_horizon       : 30
+contain_missing_values : False
+contain_equal_length   : True
+
+exploding dataframe...
+...done
+
+
+data.shape: (56392, 3)
+
+
+
+

source

+
+
+

download_all_long_term_forecasting_data

+
+
 download_all_long_term_forecasting_data
+                                          (target_dir='./data/long_forecas
+                                          ting/', force_download=False,
+                                          remove_zip=False,
+                                          c_key='archive', timeout=4,
+                                          verbose=True)
+
+
+

source

+
+
+

unzip_file

+
+
 unzip_file (file, target_dir)
+
+
+

source

+
+
+

get_long_term_forecasting_data

+
+
 get_long_term_forecasting_data (dsid,
+                                 target_dir='./data/long_forecasting/',
+                                 task='M', fcst_horizon=None,
+                                 fcst_history=None, preprocess=True,
+                                 force_download=False, remove_zip=False,
+                                 return_df=True, show_plot=True,
+                                 dtype=<class 'numpy.float32'>,
+                                 verbose=True, **kwargs)
+
+

Downloads (and preprocess) a pandas dataframe with the requested long-term forecasting dataset

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dsidID of the dataset to be used for long-term forecasting.
target_dirstr./data/long_forecasting/Directory where the long-term forecasting data will be saved.
taskstrM‘M’ for multivariate, ‘S’ for univariate and ‘MS’ for multivariate input with univariate output
fcst_horizonNoneTypeNone# historical steps used as input. If None, the default is applied.
fcst_historyNoneTypeNone# steps forecasted into the future. If None, the minimum default is applied.
preprocessboolTrueFlag that indicates whether if the data is preprocessed before saving.
force_downloadboolFalseFlag that indicates if the data should be downloaded again even if directory exists.
remove_zipboolFalseFlag that indicates if the zip file should be removed after extracting the data.
return_dfboolTrueFlag that indicates whether a dataframe (True) or X and and y arrays (False) are returned.
show_plotboolTrueplot the splits
dtypetypefloat32
verboseboolTrueFlag tto indicate the verbosity.
kwargs
+
+
dsid = "ILI"
+try:
+    df = get_long_term_forecasting_data(dsid, target_dir='./data/forecasting/', force_download=False)
+    print(f"{dsid:15}: {str(df.shape):15}")
+    del df; gc.collect()
+    remove_dir('./data/forecasting/', False)
+except Exception as e:
+    print(f"{dsid:15}: {str(e):15}")
+
+ + +
+
+ +
+ + 100.01% [54001664/53995526 00:09<00:00] +
+ +
+
+
/Users/nacho/opt/anaconda3/envs/py39t20/lib/python3.9/site-packages/fastai/tabular/core.py:23: UserWarning: The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.
+  df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
+
+
+
ILI            : (966, 8)       
+
+
+
+
dsid = "ILI"
+try:
+    X, y, splits, stats = get_long_term_forecasting_data(dsid, target_dir='./data/forecasting/', force_download=False, return_df=False, show_plot=False)
+    print(f"{dsid:15} -  X.shape: {str(X.shape):20}  y.shape: {str(y.shape):20}  splits: {str([len(s) for s in splits]):25}  \
+stats: {str([s.shape for s in stats]):30}")
+    del X, y, splits, stats
+    gc.collect()
+    remove_dir('./data/forecasting/', False)
+except Exception as e:
+    print(f"{dsid:15}: {str(e):15}")
+
+ + +
+
+ +
+ + 100.01% [54001664/53995526 00:09<00:00] +
+ +
+
+
/Users/nacho/opt/anaconda3/envs/py39t20/lib/python3.9/site-packages/fastai/tabular/core.py:23: UserWarning: The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.
+  df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
+
+
+
ILI             -  X.shape: (839, 7, 104)         y.shape: (839, 7, 24)          splits: [549, 74, 170]             stats: [(1, 7, 1), (1, 7, 1)]        
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.features.html b/data.features.html new file mode 100644 index 000000000..4b1f8e4bf --- /dev/null +++ b/data.features.html @@ -0,0 +1,3994 @@ + + + + + + + + + +tsai - Featurizing Time Series + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Featurizing Time Series

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functions used to transform time series into a dataframe that can be used to create tabular dataloaders.

+
+

In this case we are using tsfresh that is one of the most widely known libraries used to create features from time series. You can get more details about this library here: https://tsfresh.readthedocs.io/en/latest/

+
+

source

+
+

get_ts_features

+
+
 get_ts_features (X:Union[numpy.ndarray,torch.Tensor],
+                  y:Union[NoneType,numpy.ndarray,torch.Tensor]=None,
+                  features:Union[str,dict]='min',
+                  n_jobs:Optional[int]=None, **kwargs)
+
+

Args: X: np.array or torch.Tesnor of shape [samples, dimensions, timesteps]. y: Not required for unlabeled data. Otherwise, you need to pass it. features: ‘min’, ‘efficient’, ‘all’, or a dictionary. Be aware that ‘efficient’ and ‘all’ may required substantial memory and time.

+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+X.shape
+
+
(360, 24, 51)
+
+
+

There are 3 levels of fatures you can extract: ‘min’, ‘efficient’ and ‘all’. I’d encourage you to start with min as feature creation may take a long time.

+

In addition to this, you can pass a dictionary to build the desired features (see tsfresh documentation in the link above).

+
+
ts_features_df = get_ts_features(X, y)
+ts_features_df.shape
+
+
Feature Extraction: 100%|██████████| 40/40 [00:09<00:00,  4.00it/s]
+
+
+
(360, 241)
+
+
+

The ‘min’ set creates a dataframe with 8 features per channel + 1 per target (total 193) for each time series sample (360).

+
+
cont_names = ts_features_df.columns[:-1]
+y_names = 'target'
+dls = get_tabular_dls(ts_features_df, splits=splits, cont_names=cont_names, y_names=y_names)
+dls.show_batch()
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
0__sum_values0__median0__mean0__length0__standard_deviation0__variance0__root_mean_square0__maximum0__absolute_maximum0__minimum1__sum_values1__median1__mean1__length1__standard_deviation1__variance1__root_mean_square1__maximum1__absolute_maximum1__minimum2__sum_values2__median2__mean2__length2__standard_deviation2__variance2__root_mean_square2__maximum2__absolute_maximum2__minimum3__sum_values3__median3__mean3__length3__standard_deviation3__variance3__root_mean_square3__maximum3__absolute_maximum3__minimum4__sum_values4__median4__mean4__length4__standard_deviation4__variance4__root_mean_square4__maximum4__absolute_maximum4__minimum5__sum_values5__median5__mean5__length5__standard_deviation5__variance5__root_mean_square5__maximum5__absolute_maximum5__minimum6__sum_values6__median6__mean6__length6__standard_deviation6__variance6__root_mean_square6__maximum6__absolute_maximum6__minimum7__sum_values7__median7__mean7__length7__standard_deviation7__variance7__root_mean_square7__maximum7__absolute_maximum7__minimum8__sum_values8__median8__mean8__length8__standard_deviation8__variance8__root_mean_square8__maximum8__absolute_maximum8__minimum9__sum_values9__median9__mean9__length9__standard_deviation9__variance9__root_mean_square9__maximum9__absolute_maximum9__minimum10__sum_values10__median10__mean10__length10__standard_deviation10__variance10__root_mean_square10__maximum10__absolute_maximum10__minimum11__sum_values11__median11__mean11__length11__standard_deviation11__variance11__root_mean_square11__maximum11__absolute_maximum11__minimum12__sum_values12__median12__mean12__length12__standard_deviation12__variance12__root_mean_square12__maximum12__absolute_maximum12__minimum13__sum_values13__median13__mean13__length13__standard_deviation13__variance13__root_mean_square13__maximum13__absolute_maximum13__minimum14__sum_values14__median14__mean14__length14__standard_deviation14__variance14__root_mean_square14__maximum14__absolute_maximum14__minimum15__sum_values15__median15__mean15__length15__standard_deviation15__variance15__root_mean_square15__maximum15__absolute_maximum15__minimum16__sum_values16__median16__mean16__length16__standard_deviation16__variance16__root_mean_square16__maximum16__absolute_maximum16__minimum17__sum_values17__median17__mean17__length17__standard_deviation17__variance17__root_mean_square17__maximum17__absolute_maximum17__minimum18__sum_values18__median18__mean18__length18__standard_deviation18__variance18__root_mean_square18__maximum18__absolute_maximum18__minimum19__sum_values19__median19__mean19__length19__standard_deviation19__variance19__root_mean_square19__maximum19__absolute_maximum19__minimum20__sum_values20__median20__mean20__length20__standard_deviation20__variance20__root_mean_square20__maximum20__absolute_maximum20__minimum21__sum_values21__median21__mean21__length21__standard_deviation21__variance21__root_mean_square21__maximum21__absolute_maximum21__minimum22__sum_values22__median22__mean22__length22__standard_deviation22__variance22__root_mean_square22__maximum22__absolute_maximum22__minimum23__sum_values23__median23__mean23__length23__standard_deviation23__variance23__root_mean_square23__maximum23__absolute_maximum23__minimumtarget
0-29.398621-0.578626-0.57644451.00.0195950.0003840.576777-0.5361140.606751-0.606751-88.761322-1.745473-1.74041851.00.0339610.0011531.740749-1.6591811.779713-1.779713-35.539612-0.696636-0.69685551.00.0058600.0000340.696880-0.6858440.710908-0.71090863.8897970.8372081.25274151.00.6768220.4580881.4238852.1696772.1696770.536361-54.103661-1.758527-1.06085651.00.9524670.9071931.4256960.6193661.832052-1.832052-25.470938-0.562629-0.49943051.00.1766020.0311880.529735-0.1666680.659942-0.659942-33.909939-0.666688-0.66490151.00.0131680.0001730.665031-0.6439990.682598-0.682598-39.194389-0.766594-0.76851751.00.0070460.0000500.768550-0.7502280.782717-0.782717-9.894929-0.191213-0.19401851.00.0088620.0000790.194220-0.1828250.210663-0.21066341.5731090.6586930.81515951.00.2279950.0519820.8464431.1441421.1441420.601647-26.870213-0.801317-0.52686751.00.4106430.1686280.6679940.1830810.862615-0.862615-4.248524-0.096245-0.08330451.00.0331760.0011010.089668-0.0211260.121141-0.121141-33.603584-0.660318-0.65889451.00.0168150.0002830.659108-0.6262670.684841-0.684841-70.926521-1.409798-1.39071651.00.0312780.0009781.391068-1.3372751.423652-1.423652-26.984289-0.529285-0.52910451.00.0059230.0000350.529137-0.5173060.548614-0.54861456.1333010.7899741.10065351.00.4832930.2335721.2020851.7579481.7579480.587645-45.478088-1.421026-0.89172751.00.7397720.5472621.1586370.3865041.509923-1.509923-17.145500-0.387209-0.33618651.00.1277240.0163130.359631-0.1080170.469502-0.469502-25.565697-0.502589-0.50128851.00.0166130.0002760.501563-0.4658900.535957-0.535957-77.823814-1.519193-1.52595751.00.0282650.0007991.526219-1.4686851.564035-1.564035-36.996349-0.727857-0.72541951.00.0078820.0000620.725461-0.7054290.735931-0.73593158.4538310.7797821.14615451.00.6828390.4662691.3341432.0935262.0935260.433132-52.249134-1.582441-1.02449351.00.8159970.6658521.3097470.4191121.718448-1.718448-22.941730-0.449813-0.44983851.00.1795410.0322350.484344-0.1490440.718254-0.7182542.0
1-22.630985-0.572027-0.44374551.00.5489650.3013630.7058840.8443841.284441-1.284441-43.583847-1.497262-0.85458551.01.0194061.0391881.3302271.0967101.692231-1.692231-31.147585-0.398927-0.61073751.00.3608470.1302100.709373-0.2564701.562690-1.56269035.9021190.4790880.70396351.00.3652700.1334220.7930871.5592411.5592410.320613-26.475592-1.411000-0.51912951.01.2507641.5644101.3542181.2467341.762012-1.762012-34.175106-0.647045-0.67010051.00.2739880.0750690.723950-0.1899101.339854-1.339854-25.383873-0.599582-0.49772351.00.3043140.0926070.5833830.1984170.860741-0.860741-23.190578-0.632231-0.45471751.00.2677740.0717030.5277030.0868100.735903-0.735903-9.2722450.044390-0.18180951.00.3103340.0963070.3596680.1038800.687320-0.68732033.8004530.6390430.66275451.00.1558070.0242760.6808220.9713930.9713930.338048-20.350363-0.685427-0.39902751.00.4015110.1612110.5660680.3888830.798647-0.798647-22.385489-0.310659-0.43893151.00.2028670.0411550.483545-0.2235070.780465-0.780465-23.431786-0.607775-0.45944751.00.5024950.2525010.6808760.5575471.097538-1.097538-35.239632-1.173477-0.69097351.00.7905430.6249581.0499530.8501091.334061-1.334061-23.722792-0.263811-0.46515351.00.3046230.0927950.556024-0.0964691.355410-1.35541036.5327070.5768220.71632851.00.2852040.0813410.7710161.3478091.3478090.321302-23.286812-1.098079-0.45660451.00.9349370.8741061.0404780.9730061.405902-1.405902-31.214712-0.552194-0.61205351.00.1466690.0215120.629381-0.4453011.042179-1.042179-23.984716-0.675121-0.47028951.00.5541800.3071160.7268340.8075351.182987-1.182987-39.006611-1.346193-0.76483551.00.8843660.7821031.1692201.0037161.507433-1.507433-28.776979-0.436710-0.56425551.00.3286550.1080140.652991-0.2684831.373182-1.37318233.1813160.4822280.65061451.00.3660020.1339570.7464961.5547171.5547170.254808-25.558233-1.347874-0.50114251.01.1102951.2327551.2181531.1499431.741076-1.741076-33.443592-0.659850-0.65575751.00.2468300.0609250.700673-0.2506321.340311-1.3403116.0
2-35.085182-0.687561-0.68794551.00.0308690.0009530.688637-0.6242830.739868-0.739868-106.435272-2.126637-2.08696651.00.0892330.0079632.088873-1.9595782.215342-2.215342-43.562584-0.854689-0.85416851.00.0322740.0010420.854778-0.8010720.912399-0.91239973.8864671.2131131.44875451.00.6942680.4820081.6065172.3429022.3429020.642018-67.402786-2.043274-1.32162351.01.0215201.0435021.6703860.2176472.288412-2.288412-40.294270-0.799205-0.79008451.00.0485030.0023530.791571-0.6880590.888159-0.888159-34.629044-0.681434-0.67900151.00.0173850.0003020.679223-0.6492230.703762-0.703762-40.258858-0.804071-0.78938951.00.0331630.0011000.790086-0.7413030.842110-0.842110-12.998519-0.267240-0.25487351.00.0177190.0003140.255488-0.2245320.273925-0.27392547.9128840.8056940.93946851.00.2291890.0525280.9670201.2336791.2336790.697387-28.587210-0.880814-0.56053451.00.4033280.1626740.6905590.0383310.921428-0.921428-10.081242-0.202227-0.19767151.00.0302370.0009140.199971-0.1563390.284775-0.284775-33.949532-0.670309-0.66567751.00.0229190.0005250.666072-0.6293900.697200-0.697200-85.588051-1.724138-1.67819751.00.0807950.0065281.680141-1.5639661.791635-1.791635-36.548447-0.725060-0.71663651.00.0307280.0009440.717295-0.6713300.760870-0.76087065.1815261.1133611.27806951.00.4944490.2444791.3703801.9349851.9349850.748902-54.790508-1.635758-1.07432451.00.8179570.6690531.3502680.1391691.839433-1.839433-28.428947-0.573270-0.55743051.00.0538550.0029000.560026-0.4394130.630352-0.630352-26.714603-0.519391-0.52381651.00.0322240.0010380.524806-0.4895400.659033-0.659033-93.493767-1.865099-1.83321151.00.0924320.0085441.835540-1.7008412.010093-2.010093-43.577209-0.860299-0.85445551.00.0495610.0024560.855891-0.7960421.014816-1.01481665.8934861.0514411.29202951.00.6859990.4705951.4628512.1785142.1785140.574974-64.151787-1.856631-1.25787851.00.8040170.6464441.492884-0.0371612.008020-2.008020-39.252415-0.786807-0.76965551.00.0546000.0029810.771589-0.5821440.898071-0.8980713.0
39.4183920.1243060.18467451.00.6835250.4672060.7080331.0986201.098620-0.619085-65.183624-1.598239-1.27811051.00.9314680.8676321.5815180.1941342.234432-2.234432-41.886803-0.976904-0.82131051.00.2522510.0636310.859174-0.3963031.147354-1.14735445.1669580.8011480.88562751.00.2788270.0777440.9284821.6355861.6355860.577137-35.295097-1.093421-0.69206151.01.5673982.4567361.7133841.3253272.416220-2.416220-19.785007-0.294643-0.38794151.00.2825000.0798060.479901-0.0080220.944945-0.944945-28.304108-0.693464-0.55498251.00.2026520.0410680.590824-0.2424340.741845-0.741845-36.350716-0.670180-0.71275951.00.1443700.0208430.727233-0.5231740.890112-0.890112-28.392370-0.522761-0.55671351.00.1077390.0116080.567042-0.4353340.710886-0.71088638.7833210.6584750.76045751.00.1650450.0272400.7781611.0252541.0252540.573761-29.159790-0.807073-0.57176151.00.4510580.2034530.7282600.0582121.089511-1.089511-0.3574310.210966-0.00700851.00.3591320.1289760.3592000.3581090.519043-0.519043-6.840534-0.255334-0.13412851.00.5046820.2547040.5222010.5825580.693457-0.693457-55.873280-1.168818-1.09555451.00.5631110.3170941.231801-0.2199401.721503-1.721503-37.602516-0.815972-0.73730451.00.1760430.0309910.758029-0.3607760.927193-0.92719343.4455990.8486230.85187551.00.2180350.0475390.8793351.3450851.3450850.596721-33.225483-1.050173-0.65148051.01.1464531.3143531.3186280.8908061.963266-1.963266-12.263644-0.319137-0.24046451.00.2767710.0766020.3666400.2442090.701782-0.701782-1.089832-0.055650-0.02136951.00.5492410.3016650.5496560.6564050.802634-0.802634-58.871620-1.318070-1.15434651.00.7937570.6300501.4009150.0361032.034981-2.034981-38.605354-0.873019-0.75696851.00.3099860.0960910.817980-0.1838161.145963-1.14596344.8495180.8650920.87940251.00.2788590.0777620.9225571.5117871.5117870.490558-35.374996-1.168080-0.69362751.01.3923851.9387351.5555881.1222892.264597-2.264597-23.718605-0.533045-0.46507151.00.2830100.0800950.5444130.0702850.913923-0.9139236.0
40.670717-0.5516740.01315151.00.7963360.6341510.7964441.3034281.303428-0.740258-82.721443-2.117861-1.62198951.00.7458000.5562181.785236-0.3256932.317141-2.317141-42.753490-0.679642-0.83830451.00.3475710.1208050.907501-0.4505401.606419-1.60641943.9414560.6756030.86159751.00.5713900.3264871.0338452.1053192.1053190.364709-54.047619-2.158519-1.05975751.01.4513262.1063481.7970631.1288322.503777-2.503777-8.701278-0.097304-0.17061351.00.2547060.0648750.3065680.1436760.995945-0.995945-25.513714-0.696055-0.50026951.00.2743840.0752870.5705750.0899580.745964-0.745964-37.482643-0.790602-0.73495451.00.1168090.0136440.744178-0.5414650.878685-0.878685-33.195599-0.672431-0.65089451.00.1995840.0398340.680806-0.4290700.989809-0.98980934.5432700.5669180.67731951.00.2932520.0859970.7380771.2670091.2670090.387009-30.752882-0.775993-0.60299851.00.3153800.0994640.6804930.0637361.075059-1.0750595.4055600.2869170.10599151.00.2832770.0802460.3024560.3843800.392310-0.392310-10.194630-0.575963-0.19989551.00.6082740.3699970.6402770.9463590.946359-0.750195-67.181175-1.644588-1.31727851.00.4808660.2312321.402303-0.5242181.829902-1.829902-42.235485-0.693468-0.82814751.00.2667560.0711590.870049-0.6075961.443514-1.44351442.2861370.8499370.82914051.00.4458940.1988220.9414321.7981621.7981620.406039-45.729931-1.668248-0.89666551.01.0436431.0891901.3759360.7526022.014132-2.014132-2.5458630.035224-0.04991951.00.1760970.0310100.1830360.1694950.488377-0.488377-7.721382-0.629895-0.15140051.00.7318680.5356310.7473641.0815531.081553-0.841546-73.237610-1.826482-1.43603251.00.6686360.4470741.584065-0.2356372.140610-2.140610-44.032135-0.673831-0.86337551.00.3866340.1494860.945993-0.4194741.715174-1.71517441.5101660.7338730.81392551.00.5067570.2568030.9587891.9937381.9937380.362309-50.167858-1.763661-0.98368451.01.2475091.5562791.5886830.9559172.361580-2.361580-8.757638-0.146574-0.17171851.00.2367430.0560470.2924630.2021880.984318-0.9843186.0
5-50.560036-0.925159-0.99137351.00.9705200.9419101.3873470.8007612.398621-2.398621-30.233727-0.178814-0.59281851.01.1032511.2171621.2524360.9729762.095890-2.095890-42.399975-0.641855-0.83137251.00.3693490.1364190.909724-0.2935711.665465-1.66546539.1976850.7353260.76858251.00.9728650.9464671.2398332.2237902.223790-0.900167-25.8614390.006240-0.50708751.01.1037171.2181901.2146311.0363802.057254-2.057254-54.733055-0.955823-1.07319751.00.3417520.1167951.126298-0.4071281.797132-1.797132-43.710899-0.780402-0.85707651.00.2443020.0596830.891215-0.5680181.237588-1.237588-16.875654-0.136531-0.33089551.00.3809010.1450860.5045570.3019340.825958-0.825958-12.057025-0.090500-0.23641251.00.2468390.0609290.3417900.0028090.739954-0.73995443.4130630.7794470.85123751.00.2246090.0504490.8803711.2447181.2447180.445537-16.459957-0.233873-0.32274451.00.3780000.1428840.4970390.2675690.802695-0.802695-29.580076-0.443360-0.58000251.00.2830710.0801290.645392-0.3409701.223840-1.223840-46.450630-0.899175-0.91079751.00.7114640.5061811.1557390.5681561.918004-1.918004-25.124174-0.161948-0.49263151.00.8041410.6466430.9430420.7270191.617498-1.617498-31.201376-0.434257-0.61179251.00.3131010.0980320.687256-0.1726931.229459-1.22945941.9379920.7931400.82231451.00.6412210.4111641.0427671.8125131.812513-0.339336-21.495180-0.052168-0.42147451.00.7999140.6398620.9041580.6209371.550453-1.550453-42.943439-0.776023-0.84202851.00.2251230.0506800.871603-0.2856061.344810-1.344810-52.034657-0.921795-1.02028751.00.8986300.8075371.3596040.6551262.344413-2.344413-27.837105-0.061648-0.54582651.00.9690400.9390391.1121890.9318541.992733-1.992733-36.875374-0.585629-0.72304751.00.3496260.1222380.803141-0.2154931.429438-1.42943840.2477420.7980900.78917151.00.7860850.6179301.1138771.9845811.984581-0.677269-23.811928-0.119661-0.46690151.00.9124580.8325791.0249760.8002611.841989-1.841989-52.909077-1.040256-1.03743351.00.3354960.1125571.090332-0.3342811.676773-1.6767734.0
6-39.833595-0.581072-0.78105151.00.9090440.8263601.1984990.5685402.283604-2.283604-38.821877-0.786507-0.76121351.00.9619090.9252691.2266680.5116681.878216-1.878216-38.064991-0.843750-0.74637251.00.2894460.0837790.800532-0.1370361.416119-1.41611941.8325540.6154050.82024651.00.8521350.7261351.1827672.2786942.278694-0.571341-46.324547-0.797607-0.90832451.00.9667970.9346961.3265560.5424402.116034-2.116034-20.751741-0.570023-0.40689751.00.5814640.3381000.7096940.7680661.202626-1.202626-37.311344-0.603243-0.73159551.00.2366900.0560220.768930-0.4288481.151210-1.151210-24.191328-0.593942-0.47434051.00.3219640.1036610.5732880.1182050.778171-0.778171-10.869180-0.166239-0.21312151.00.1185230.0140480.243861-0.0551370.518928-0.51892837.0555420.6307200.72657951.00.1874770.0351470.7503771.0419331.0419330.511867-27.166332-0.715093-0.53267351.00.2975830.0885560.6101610.0322860.820952-0.820952-5.845339-0.214925-0.11461451.00.2195780.0482150.2476910.3027560.333025-0.333025-38.241142-0.594127-0.74982651.00.6278570.3942040.9779790.2569951.747855-1.747855-32.509758-0.579769-0.63744651.00.6907230.4770980.9399130.3385811.427238-1.427238-26.793001-0.502856-0.52535351.00.1405100.0197430.543819-0.3012020.999421-0.99942138.7000470.6075110.75882451.00.5840790.3411480.9575821.6452911.645291-0.209163-37.871262-0.758263-0.74257451.00.6486420.4207370.9859780.2972681.478050-1.478050-15.965348-0.512104-0.31304651.00.4369210.1909000.5374920.5435581.049285-1.049285-39.408485-0.581423-0.77271551.00.8014240.6422811.1132700.4976812.193919-2.193919-36.982643-0.627740-0.72515051.00.8568150.7341321.1224860.5179041.754343-1.754343-31.530857-0.578748-0.61825251.00.2790740.0778820.678320-0.2082441.305216-1.30521637.9095230.5334700.74332451.00.7962100.6339511.0892572.1038472.103847-0.552248-39.577774-0.742289-0.77603551.00.8493100.7213271.1504600.5754111.851035-1.851035-22.426970-0.587765-0.43974551.00.5532020.3060330.7066880.6897701.236947-1.2369475.0
7-36.896690-0.912707-0.72346451.01.0640851.1322771.2867320.9122052.307096-2.307096-19.239029-0.282667-0.37723651.01.0524071.1075611.1179750.8818562.153838-2.153838-62.821152-1.281961-1.23178751.00.5309440.2819011.3413430.0981882.117577-2.11757755.3592451.1070401.08547551.00.9817490.9638321.4635882.3481342.348134-0.507044-23.6113090.024880-0.46296751.01.0223591.0452181.1222990.8848842.283836-2.283836-34.417664-0.666021-0.67485651.00.3363520.1131330.754031-0.0176471.378753-1.378753-38.069172-0.877256-0.74645451.00.3544190.1256130.826321-0.0770061.163693-1.163693-10.263086-0.051315-0.20123751.00.3704570.1372380.4215860.2564800.847701-0.847701-30.174412-0.570585-0.59165551.00.1731840.0299930.616481-0.3569801.046295-1.04629546.8659320.8775400.91894051.00.1883300.0354680.9380401.2022891.2022890.597824-24.255749-0.452062-0.47560351.00.3094310.0957480.5674030.0615710.980127-0.980127-10.561463-0.084279-0.20708851.00.2666640.0711090.3376310.1003900.787678-0.787678-35.248955-0.933832-0.69115651.00.7920440.6273341.0512040.6131021.636708-1.636708-16.299442-0.262266-0.31959751.00.8052760.6484690.8663780.6824061.797832-1.797832-50.258915-0.981771-0.98546951.00.3273220.1071391.038407-0.3829161.556926-1.55692650.6336331.0685980.99281651.00.6691710.4477891.1972781.8686721.868672-0.068966-22.703091-0.056605-0.44515951.00.7396520.5470840.8632790.6719851.762866-1.762866-24.762434-0.469707-0.48553851.00.2680930.0718740.554636-0.0001260.909962-0.909962-38.911346-0.915691-0.76296851.00.9412450.8859431.2116360.7476332.031039-2.031039-19.286667-0.411864-0.37817051.00.9384020.8805991.0117370.8500901.990998-1.990998-54.983307-1.106866-1.07810451.00.4982550.2482581.187673-0.0449801.856607-1.85660755.2728421.1427731.08378151.00.9124290.8325271.4167252.3388982.338898-0.387132-24.889832-0.118238-0.48803651.00.8727880.7617580.9999690.6860742.103849-2.103849-32.013580-0.695968-0.62771751.00.3531810.1247370.7202540.0515801.284638-1.2846384.0
8-26.011322-0.536596-0.51002651.00.0556070.0030920.513048-0.4195690.570454-0.570454-92.222801-1.791812-1.80829051.00.0982980.0096631.810960-1.6993341.995639-1.995639-37.869225-0.712115-0.74253451.00.0480710.0023110.744088-0.6776410.836827-0.83682764.2456131.2864821.25971851.00.5868560.3444001.3897082.0071802.0071800.607638-38.855114-1.503779-0.76186551.01.1351161.2884891.3670871.1954781.817826-1.817826-28.189720-0.513122-0.55274051.00.1032270.0106560.562296-0.3694790.759343-0.759343-27.265202-0.548034-0.53461251.00.0227530.0005180.535096-0.4906230.572237-0.572237-39.857010-0.771910-0.78151051.00.0619020.0038320.783958-0.7176540.923757-0.923757-6.367034-0.115645-0.12484451.00.0601070.0036130.138560-0.0624560.225210-0.22521042.3728100.8867920.83083951.00.2010090.0404050.8548091.1091161.1091160.605396-13.769955-0.545444-0.26999951.00.5488120.3011950.6116320.6579590.810047-0.810047-11.842787-0.255244-0.23221251.00.0449010.0020160.236513-0.1452910.307812-0.307812-28.094807-0.559510-0.55087951.00.0438670.0019240.552622-0.4917680.597890-0.597890-73.582306-1.460591-1.44279051.00.1221980.0149321.447956-1.3048981.658409-1.658409-22.921581-0.463139-0.44944351.00.0524600.0027520.452494-0.3850890.526610-0.52661056.1092911.2018111.10018251.00.4450870.1981031.1868041.6766721.6766720.602798-29.902466-1.201119-0.58632351.00.9508690.9041511.1171061.0497861.495527-1.495527-23.161522-0.453892-0.45414751.00.0713120.0050850.459712-0.3081340.624632-0.624632-30.269337-0.606525-0.59351651.00.0539510.0029110.595963-0.5018980.661645-0.661645-85.624870-1.647284-1.67891951.00.1074650.0115491.682355-1.5488761.879864-1.879864-26.338734-0.505198-0.51644651.00.0556440.0030960.519435-0.4435950.624974-0.62497463.0658341.1335811.23658551.00.5366580.2880021.3480151.9355721.9355720.574532-40.009689-1.531141-0.78450451.01.0031451.0063011.2734780.9794401.697684-1.697684-26.970509-0.520441-0.52883451.00.0843880.0071210.535524-0.3475880.714076-0.7140763.0
9-24.176491-0.488977-0.47404951.00.0431680.0018630.476010-0.3661970.527277-0.527277-87.327805-1.710973-1.71231051.00.0315610.0009961.712601-1.6318051.772976-1.772976-21.451313-0.408822-0.42061451.00.0327610.0010730.421888-0.3761940.493162-0.49316258.3976290.7481251.14505151.00.6926900.4798201.3382692.1033782.1033780.480623-48.922287-1.675132-0.95926151.00.9744410.9495351.3673750.8529681.817363-1.817363-29.856304-0.640901-0.58541851.00.1241950.0154240.598447-0.3120800.750260-0.750260-27.958775-0.565279-0.54821151.00.0351930.0012390.549340-0.4920600.583746-0.583746-37.864830-0.732618-0.74244851.00.0410910.0016880.743584-0.6920590.802448-0.802448-0.649227-0.005362-0.01273051.00.0392930.0015440.0413040.0350670.069732-0.06973243.4241450.7332170.85145451.00.2187250.0478410.8790991.1710121.1710120.632703-21.846182-0.721695-0.42835651.00.3897600.1519120.5791390.2879350.756129-0.756129-13.696244-0.290477-0.26855451.00.0494350.0024440.273066-0.1696510.314362-0.314362-28.335342-0.570419-0.55559551.00.0313280.0009810.556478-0.5082270.598616-0.598616-67.415817-1.322900-1.32187951.00.0206170.0004251.322040-1.2528321.356348-1.356348-14.509363-0.280158-0.28449751.00.0271590.0007380.285791-0.2373790.326241-0.32624154.2231450.8084091.06319951.00.4877720.2379211.1697491.7478801.7478800.582484-38.194836-1.286616-0.74891851.00.7200420.5184611.0389130.5894101.341641-1.341641-24.306427-0.513999-0.47659751.00.1137420.0129370.489981-0.2363200.606179-0.606179-20.204275-0.414832-0.39616251.00.0364960.0013320.397840-0.3289040.434622-0.434622-71.983742-1.408678-1.41144651.00.0318270.0010131.411805-1.3464051.477625-1.477625-23.991568-0.466880-0.47042351.00.0270610.0007320.471201-0.4308810.532167-0.53216753.2707440.7677351.04452451.00.6718430.4513731.2419362.0021482.0021480.376754-38.753330-1.399783-0.75986951.00.8820020.7779271.1641860.8855381.513484-1.513484-31.401262-0.670306-0.61571151.00.1521180.0231400.634224-0.2864980.862852-0.8628522.0
+
+
+
+
x_cat, x_cont, yb = first(dls.train)
+x_cont[:10]
+
+
tensor([[-0.8581, -0.1346, -0.8581,  ...,  0.3235,  0.5841, -0.6917],
+        [-0.0406,  0.0344, -0.0406,  ..., -0.0171, -1.7253,  1.2745],
+        [ 1.7966, -0.0497,  1.7966,  ..., -0.6516,  0.9802, -1.0290],
+        ...,
+        [ 0.2804,  0.4747,  0.2804,  ..., -0.4816, -0.3325,  0.0887],
+        [-0.1472, -0.1884, -0.1472,  ..., -0.1036, -1.2258,  0.8491],
+        [-0.9960, -0.6116, -0.9960,  ...,  0.4602,  3.3105, -3.0129]])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.image.html b/data.image.html new file mode 100644 index 000000000..880407c3b --- /dev/null +++ b/data.image.html @@ -0,0 +1,1500 @@ + + + + + + + + + +tsai - Imaging Time Series + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Imaging Time Series

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Main functions used to transform time series into TSImage tensors.

+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+
+
+

source

+
+

ToTSImage

+
+
 ToTSImage (enc=None, dec=None, split_idx=None, order=None)
+
+

Delegates (__call__,decode,setup) to (encodes,decodes,setups) if split_idx matches

+
+

source

+
+
+

TSImage

+
+
 TSImage (x, **kwargs)
+
+

A Tensor which support subclass pickling, and maintains metadata when casting or after methods

+
+

source

+
+
+

TSToPlot

+
+
 TSToPlot (size:Optional[int]=224, dpi:int=100, lw=1, **kwargs)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by creating a matplotlib plot.

+
+
out = TSToPlot()(TSTensor(X[:2]), split_idx=0)
+print(out.shape)
+out[0].show()
+
+
torch.Size([2, 3, 224, 224])
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSToMat

+
+
 TSToMat (size=224, dpi=100, cmap=None, **kwargs)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by creating a matplotlib matrix. Input data must be normalized with a range(-1, 1)

+
+
out = TSToMat()(TSTensor(X[:2]), split_idx=0)
+print(out.shape)
+out[0].show()
+
+
torch.Size([2, 3, 224, 224])
+
+
+
+
+

+
+
+
+
+
+
out = TSToMat(cmap='spring')(TSTensor(X[:2]), split_idx=0)
+print(out.shape)
+out[0].show()
+
+
torch.Size([2, 3, 224, 224])
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSToJRP

+
+
 TSToJRP (size=224, cmap=None, dimension=1, time_delay=1, threshold=None,
+          percentage=10)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Joint Recurrence Plot

+
+

source

+
+
+

TSToRP

+
+
 TSToRP (size=224, cmap=None, dimension=1, time_delay=1, threshold=None,
+         percentage=10, flatten=False)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Recurrence Plot. It requires input to be previously normalized between -1 and 1

+
+

source

+
+
+

TSToMTF

+
+
 TSToMTF (size=224, cmap=None, n_bins=5, image_size=1.0,
+          strategy='quantile', overlapping=False, flatten=False)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Markov Transition Field

+
+

source

+
+
+

TSToGASF

+
+
 TSToGASF (size=224, cmap=None, range=None, image_size=1.0,
+           sample_range=(-1, 1), method='summation', overlapping=False,
+           flatten=False)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Gramian Angular Summation Field. It requires either input to be previously normalized between -1 and 1 or set range to (-1, 1)

+
+

source

+
+
+

TSToGADF

+
+
 TSToGADF (size=224, cmap=None, range=None, image_size=1.0,
+           sample_range=(-1, 1), method='summation', overlapping=False,
+           flatten=False)
+
+

Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Gramian Angular Difference Field. It requires either input to be previously normalized between -1 and 1 or set range to (-1, 1)

+
+
out = TSToRP()(TSTensor(X[:2]), split_idx=0)
+print(out.shape)
+out[0].show()
+
+
torch.Size([2, 24, 224, 224])
+
+
+
+
+

+
+
+
+
+
+
o = TSTensor(X[0][1][None])
+encoder = RecurrencePlot()
+a = encoder.fit_transform(o.cpu().numpy())[0]
+o = TSTensor(X[0])
+encoder = RecurrencePlot()
+b = encoder.fit_transform(o.cpu().numpy())[1]
+test_eq(a,b) # channels can all be processed in parallel
+
+
+
test_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[0], TSToRP()(TSTensor(X[0][0][None]), split_idx=False)[0])
+test_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[1], TSToRP()(TSTensor(X[0][1][None]), split_idx=False)[0])
+test_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[2], TSToRP()(TSTensor(X[0][2][None]), split_idx=False)[0])
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms = [None, Categorize()]
+bts = [[TSNormalize(), TSToPlot(100)],
+       [TSNormalize(), TSToMat(100)],
+       [TSNormalize(), TSToGADF(100)],
+       [TSNormalize(), TSToGASF(100)],
+       [TSNormalize(), TSToMTF(100)],
+       [TSNormalize(), TSToRP(100)]]
+btns = ['Plot', 'Mat', 'GADF', 'GASF', 'MTF', 'RP']
+dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
+for i, (bt, btn) in enumerate(zip(bts, btns)):
+    dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=8, batch_tfms=bt)
+    test_eq(dls.vars, 3 if i <2 else X.shape[1])
+    test_eq(dls.vars, 3 if i <2 else X.shape[1])
+    test_eq(dls.len, (100,100))
+    xb, yb = dls.train.one_batch()
+    print(i, btn, xb, xb.dtype, xb.min(), xb.max())
+    xb[0].show()
+    plt.show()
+
+
0 Plot TSImage(shape:torch.Size([8, 3, 100, 100])) torch.float32 0.054901961237192154 1.0
+1 Mat TSImage(shape:torch.Size([8, 3, 100, 100])) torch.float32 0.019607843831181526 1.0
+2 GADF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 2.980232238769531e-07 0.9999997019767761
+3 GASF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 0.938302218914032
+4 MTF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 1.0
+5 RP TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 0.8106333613395691
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+

The simplest way to train a model using time series to image transforms is this:

+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms = [None, Categorize()]
+batch_tfms = [TSNormalize(), TSToGADF(224)]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+learn = tsimage_learner(dls, xresnet34)
+learn.fit_one_cycle(10)
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.metadatasets.html b/data.metadatasets.html new file mode 100644 index 000000000..c84d988f6 --- /dev/null +++ b/data.metadatasets.html @@ -0,0 +1,1333 @@ + + + + + + + + + +tsai - Metadataset + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Metadataset

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

A dataset of datasets

+
+

This functionality will allow you to create a dataset from data stores in multiple, smaller datasets.

+

I’d like to thank both Thomas Capelle (https://github.com/tcapelle) and Xander Dunn (https://github.com/xanderdunn) for their contributions to make this code possible.

+

This functionality allows you to use multiple numpy arrays instead of a single one, which may be very useful in many practical settings. It’s been tested it with 10k+ datasets and it works well.

+
+

source

+
+

TSMetaDatasets

+
+
 TSMetaDatasets (metadataset, splits)
+
+

Base class for lists with subsets

+
+

source

+
+
+

TSMetaDataset

+
+
 TSMetaDataset (dataset_list, **kwargs)
+
+

Initialize self. See help(type(self)) for accurate signature.

+

Let’s create 3 datasets. In this case they will have different sizes.

+
+
vocab = alphabet[:10]
+dsets = []
+for i in range(3):
+    size = np.random.randint(50, 150)
+    X = torch.rand(size, 5, 50)
+    y = vocab[torch.randint(0, 10, (size,))]
+    tfms = [None, TSClassification(vocab=vocab)]
+    dset = TSDatasets(X, y, tfms=tfms)
+    dsets.append(dset)
+
+
+
+metadataset = TSMetaDataset(dsets)
+splits = TimeSplitter(show_plot=False)(metadataset)
+metadatasets = TSMetaDatasets(metadataset, splits=splits)
+dls = TSDataLoaders.from_dsets(metadatasets.train, metadatasets.valid)
+xb, yb = dls.train.one_batch()
+xb, yb
+
+
(TSTensor(samples:64, vars:5, len:50, device=cpu, dtype=torch.float32),
+ TensorCategory([1, 0, 3, 9, 7, 2, 8, 6, 1, 1, 1, 8, 1, 1, 9, 2, 6, 6, 1, 5, 5,
+                 6, 9, 2, 7, 1, 6, 4, 9, 2, 5, 0, 4, 9, 1, 4, 4, 6, 0, 8, 8, 5,
+                 8, 6, 9, 0, 8, 8, 6, 4, 8, 9, 7, 3, 4, 7, 7, 8, 6, 2, 3, 0, 7,
+                 4]))
+
+
+

You can train metadatasets as you would train any other time series model in tsai:

+
learn = ts_learner(dls, arch="TSTPlus")
+learn.fit_one_cycle(1)
+learn.export("test.pkl")
+

For inference, you should create the new metadatasets using the same method you used when you trained it. The you use fastai’s learn.get_preds method to generate predictions:

+
vocab = alphabet[:10]
+dsets = []
+for i in range(3):
+    size = np.random.randint(50, 150)
+    X = torch.rand(size, 5, 50)
+    y = vocab[torch.randint(0, 10, (size,))]
+    tfms = [None, TSClassification(vocab=vocab)]
+    dset = TSDatasets(X, y, tfms=tfms)
+    dsets.append(dset)
+metadataset = TSMetaDataset(dsets)
+dl = TSDataLoader(metadataset)
+
+
+learn = load_learner("test.pkl")
+learn.get_preds(dl=dl)
+

There also en easy way to map any particular sample in a batch to the original dataset and id:

+
+
dls = TSDataLoaders.from_dsets(metadatasets.train, metadatasets.valid)
+xb, yb = first(dls.train)
+mappings = dls.train.dataset.mapping_idxs
+for i, (xbi, ybi) in enumerate(zip(xb, yb)):
+    ds, idx = mappings[i]
+    test_close(dsets[ds][idx][0].data.cpu(), xbi.cpu())
+    test_close(dsets[ds][idx][1].data.cpu(), ybi.cpu())
+
+

For example the 3rd sample in this batch would be:

+
+
dls.train.dataset.mapping_idxs[2]
+
+
array([  0, 112], dtype=int32)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.mixed.html b/data.mixed.html new file mode 100644 index 000000000..b750ffa16 --- /dev/null +++ b/data.mixed.html @@ -0,0 +1,1770 @@ + + + + + + + + + +tsai - Mixed data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Mixed data

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

DataLoader than can take data from multiple dataloaders with different types of data

+
+
+

source

+
+

MixedDataLoaders

+
+
 MixedDataLoaders (*loaders, path:str|Path='.', device=None)
+
+

Basic wrapper around several DataLoaders.

+
+

source

+
+
+

MixedDataLoader

+
+
 MixedDataLoader (*loaders, path='.', shuffle=False, device=None, bs=None)
+
+

Accepts any number of DataLoader and a device

+
+

source

+
+
+

get_mixed_dls

+
+
 get_mixed_dls (*dls, device=None, shuffle_train=None, shuffle_valid=None,
+                **kwargs)
+
+
+
from tsai.data.tabular import *
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable
+target = 'salary'
+splits = RandomSplitter()(range_of(df))
+
+cat_names = ['workclass', 'education', 'marital-status']
+cont_names = ['age', 'fnlwgt']
+dls1 = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=512)
+dls1.show_batch()
+
+cat_names = None #['occupation', 'relationship', 'race']
+cont_names = ['education-num']
+dls2 = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=128)
+dls2.show_batch()
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
workclasseducationmarital-statusagefnlwgtsalary
0PrivateBachelorsMarried-civ-spouse59.999999131680.999115>=50k
1Private12thNever-married18.000000311795.000052<50k
2PrivateHS-gradMarried-civ-spouse45.000000350440.002257>=50k
3Local-govMastersNever-married44.000000101593.001253<50k
4?Some-collegeNever-married20.99999941355.995576<50k
5PrivateBachelorsNever-married30.000000207668.000292<50k
6Federal-govBachelorsNever-married28.000000281859.998606<50k
7?Some-collegeNever-married20.999999180338.999810<50k
8PrivateSome-collegeNever-married20.000000174713.999509<50k
9Self-emp-not-incBachelorsMarried-civ-spouse50.000000334273.005863<50k
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
education-num_naeducation-numsalary
0False9.0<50k
1False9.0<50k
2False13.0>=50k
3False9.0<50k
4False9.0<50k
5False13.0>=50k
6False10.0<50k
7False10.0<50k
8False13.0<50k
9False10.0<50k
+
+
+
+
dls = get_mixed_dls(dls1, dls2, bs=8)
+first(dls.train)
+first(dls.valid)
+torch.save(dls,'export/mixed_dls.pth')
+del dls
+dls = torch.load('export/mixed_dls.pth')
+dls.train.show_batch()
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
workclasseducationmarital-statusagefnlwgtsalary
0State-govHS-gradNever-married43.00000023156.998049<50k
1Private11thMarried-civ-spouse32.000000140092.001434<50k
2Self-emp-not-incHS-gradNever-married43.00000048086.995399<50k
3Self-emp-not-incAssoc-acdmNever-married34.000000177638.999728<50k
4Local-govMastersMarried-civ-spouse65.000001146453.999176<50k
5PrivateHS-gradMarried-civ-spouse33.000000227281.999333<50k
6PrivateHS-gradNever-married33.000000194900.999911<50k
7PrivateHS-gradDivorced23.000000259301.002460<50k
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
education-num_naeducation-numsalary
0False9.0<50k
1False7.0<50k
2False9.0<50k
3False12.0<50k
4False14.0<50k
5True10.0<50k
6False9.0<50k
7False9.0<50k
+
+
+
+
xb, yb = first(dls.train)
+xb
+
+
((tensor([[ 8, 12,  5],
+          [ 5,  2,  3],
+          [ 7, 12,  5],
+          [ 7,  8,  5],
+          [ 3, 13,  3],
+          [ 5, 12,  3],
+          [ 5, 12,  5],
+          [ 5, 12,  1]]),
+  tensor([[ 0.3222, -1.5782],
+          [-0.4850, -0.4696],
+          [ 0.3222, -1.3418],
+          [-0.3383, -0.1136],
+          [ 1.9368, -0.4093],
+          [-0.4117,  0.3570],
+          [-0.4117,  0.0500],
+          [-1.1455,  0.6606]])),
+ (tensor([[1],
+          [1],
+          [1],
+          [1],
+          [1],
+          [2],
+          [1],
+          [1]]),
+  tensor([[-0.4258],
+          [-1.2097],
+          [-0.4258],
+          [ 0.7502],
+          [ 1.5342],
+          [-0.0338],
+          [-0.4258],
+          [-0.4258]])))
+
+
+
+
xs, ys = first(dls.train)
+xs[0][0].shape, xs[0][1].shape, xs[1][0].shape, xs[1][1].shape
+
+
(torch.Size([8, 3]),
+ torch.Size([8, 2]),
+ torch.Size([8, 1]),
+ torch.Size([8, 1]))
+
+
+
+
from tsai.data.validation import TimeSplitter
+from tsai.data.core import TSRegression, get_ts_dls
+
+
+
X = np.repeat(np.repeat(np.arange(8)[:, None, None], 2, 1), 5, 2).astype(float)
+X = np.concatenate([X, X])
+y = np.concatenate([np.arange(len(X)//2)]*2)
+alphabet = np.array(list(string.ascii_lowercase))
+# y = alphabet[y]
+splits = TimeSplitter(.5, show_plot=False)(range_of(X))
+tfms = [None, TSRegression()]
+dls1 = get_ts_dls(X, y, splits=splits, tfms=tfms)
+dls1.one_batch()
+
+
(TSTensor(samples:8, vars:2, len:5, device=cpu, dtype=torch.float32),
+ tensor([7., 0., 2., 1., 5., 4., 3., 6.]))
+
+
+
+
data = np.concatenate([np.repeat(np.arange(8)[:, None], 3, 1)*np.array([1, 10, 100])]*2)
+df = pd.DataFrame(data, columns=['cat1', 'cat2', 'cont'])
+df['cont'] = df['cont'].astype(float)
+df['target'] = y
+cat_names = ['cat1', 'cat2']
+cont_names = ['cont']
+target = 'target'
+dls2 = get_tabular_dls(df, procs=[Categorify, FillMissing, #Normalize
+                                 ], cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=8)
+dls2.one_batch()
+
+
(tensor([[2, 2],
+         [5, 5],
+         [1, 1],
+         [7, 7],
+         [3, 3],
+         [6, 6],
+         [8, 8],
+         [4, 4]]),
+ tensor([[100.],
+         [400.],
+         [  0.],
+         [600.],
+         [200.],
+         [500.],
+         [700.],
+         [300.]]),
+ tensor([[1],
+         [4],
+         [0],
+         [6],
+         [2],
+         [5],
+         [7],
+         [3]], dtype=torch.int8))
+
+
+
+
z = zip(_loaders[dls1.train.fake_l.num_workers == 0](dls1.train.fake_l))
+for b in z: 
+    print(b)
+    break
+
+
((TSTensor(samples:8, vars:2, len:5, device=cpu, dtype=torch.float32), tensor([7., 0., 2., 1., 5., 4., 3., 6.])),)
+
+
+
+
bs = 8
+dls = get_mixed_dls(dls1, dls2, bs=bs)
+dl = dls.train
+xb, yb = dl.one_batch()
+test_eq(len(xb), 2)
+test_eq(len(xb[0]), bs)
+test_eq(len(xb[1]), 2)
+test_eq(len(xb[1][0]), bs)
+test_eq(len(xb[1][1]), bs)
+test_eq(xb[0].data[:, 0, 0].long(), xb[1][0][:, 0] - 1) # categorical data and ts are in synch
+test_eq(xb[0].data[:, 0, 0], (xb[1][1]/100).flatten()) # continuous data and ts are in synch
+test_eq(tensor(dl.input_idxs), yb.long().cpu())
+dl = dls.valid
+xb, yb = dl.one_batch()
+test_eq(tensor(y[dl.input_idxs]), yb.long().cpu())
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.mixed_augmentation.html b/data.mixed_augmentation.html new file mode 100644 index 000000000..4e88a439b --- /dev/null +++ b/data.mixed_augmentation.html @@ -0,0 +1,1434 @@ + + + + + + + + + +tsai - Label-mixing transforms + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Label-mixing transforms

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Callbacks that perform data augmentation by mixing samples in different ways.

+
+
+

source

+
+

MixHandler1d

+
+
 MixHandler1d (alpha=0.5)
+
+

A handler class for implementing mixed sample data augmentation

+
+

source

+
+
+

MixUp1d

+
+
 MixUp1d (alpha=0.4)
+
+

Implementation of https://arxiv.org/abs/1710.09412

+
+
from fastai.learner import *
+from tsai.models.InceptionTime import *
+from tsai.data.external import get_UCR_data
+from tsai.data.core import get_ts_dls, TSCategorize
+from tsai.data.preprocessing import TSStandardize
+from tsai.learner import ts_learner
+
+
+
X, y, splits = get_UCR_data('NATOPS', return_split=False)
+tfms = [None, TSCategorize()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)
+learn = ts_learner(dls, InceptionTime, cbs=MixUp1d(0.4))
+learn.fit_one_cycle(1)
+
+ + +
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_losstime
01.9084551.81190800:03
+
+
+
+

source

+
+
+

CutMix1d

+
+
 CutMix1d (alpha=1.0)
+
+

Implementation of https://arxiv.org/abs/1905.04899

+
+

source

+
+
+

IntraClassCutMix1d

+
+
 IntraClassCutMix1d (alpha=1.0)
+
+

Implementation of CutMix applied to examples of the same class

+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms = [None, TSCategorize()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)
+learn = ts_learner(dls, InceptionTime, cbs=IntraClassCutMix1d())
+learn.fit_one_cycle(1)
+
+ + +
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_losstime
01.8134831.79201000:03
+
+
+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms = [None, TSCategorize()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)
+learn = ts_learner(dls, cbs=CutMix1d(1.))
+learn.fit_one_cycle(1)
+
+ + +
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_losstime
01.8245091.77496400:04
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.preparation.html b/data.preparation.html new file mode 100644 index 000000000..4e4ddf4d0 --- /dev/null +++ b/data.preparation.html @@ -0,0 +1,8580 @@ + + + + + + + + + +tsai - Data preparation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Data preparation

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functions required to prepare X (and y) from a pandas dataframe.

+
+
+

source

+
+

apply_sliding_window

+
+
 apply_sliding_window (data, window_len:Union[int,list],
+                       horizon:Union[int,list]=0,
+                       x_vars:Union[int,list]=None,
+                       y_vars:Union[int,list]=None)
+
+

Applies a sliding window on an array-like input to generate a 3d X (and optionally y)

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dataand array-like object with the input data
window_lenint | listsliding window length. When using a list, use negative numbers and 0.
horizonint | list0horizon
x_varsint | listNoneindices of the independent variables
y_varsint | listNoneindices of the dependent variables (target). [] means no y will be created. None means all variables.
+
+

source

+
+
+

prepare_sel_vars_and_steps

+
+
 prepare_sel_vars_and_steps (sel_vars=None, sel_steps=None, idxs=False)
+
+
+

source

+
+
+

prepare_idxs

+
+
 prepare_idxs (o, shape=None)
+
+
+
data = np.arange(20).reshape(-1,1).repeat(3, 1) * np.array([1, 10, 100])
+df = pd.DataFrame(data, columns=['feat_1', 'feat_2', 'feat_3'])
+df.head()
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
feat_1feat_2feat_3
0000
1110100
2220200
3330300
4440400
+ +
+
+
+
+
window_len = 8
+horizon = 1
+x_vars = None
+y_vars = None
+X, y = apply_sliding_window(data, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)
+print(np.shares_memory(X, data))
+print(np.shares_memory(y, data))
+print(X.shape, y.shape)
+test_eq(X.shape, (len(df) - (window_len - 1 + horizon), df.shape[1], window_len))
+test_eq(y.shape, (len(df) - (window_len - 1 + horizon), df.shape[1]))
+X[0], y[0]
+
+
True
+True
+(12, 3, 8) (12, 3)
+
+
+
(array([[  0,   1,   2,   3,   4,   5,   6,   7],
+        [  0,  10,  20,  30,  40,  50,  60,  70],
+        [  0, 100, 200, 300, 400, 500, 600, 700]]),
+ array([  8,  80, 800]))
+
+
+
+
window_len = 8
+horizon = 1
+x_vars = None
+y_vars = 0
+X, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)
+print(np.shares_memory(X, df))
+print(np.shares_memory(y, df))
+print(X.shape, y.shape)
+test_eq(X.shape, (len(df) - (window_len - 1 + horizon), df.shape[1], window_len))
+test_eq(y.shape, (len(df) - (window_len - 1 + horizon),))
+X[0], y[0]
+
+
True
+True
+(12, 3, 8) (12,)
+
+
+
(array([[  0,   1,   2,   3,   4,   5,   6,   7],
+        [  0,  10,  20,  30,  40,  50,  60,  70],
+        [  0, 100, 200, 300, 400, 500, 600, 700]]),
+ 8)
+
+
+
+
window_len = 8
+horizon = [1, 2]
+x_vars = 0
+y_vars = [1, 2]
+X, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)
+print(np.shares_memory(X, df))
+print(np.shares_memory(y, df))
+print(X.shape, y.shape)
+test_eq(X.shape, (len(df) - (window_len - 1 + max(horizon)), 1, window_len))
+test_eq(y.shape, (len(df) - (window_len - 1 + max(horizon)), len(y_vars), len(horizon)))
+X[0], y[0]
+
+
True
+False
+(11, 1, 8) (11, 2, 2)
+
+
+
(array([[0, 1, 2, 3, 4, 5, 6, 7]]),
+ array([[ 80,  90],
+        [800, 900]]))
+
+
+
+
window_len = [-4, -2, -1, 0]
+horizon = [1, 2, 4]
+x_vars = 0
+y_vars = [1, 2]
+X, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)
+print(np.shares_memory(X, df))
+print(np.shares_memory(y, df))
+print(X.shape, y.shape)
+test_eq(X.shape, (12, 1, 4))
+test_eq(y.shape, (12, 2, 3))
+X[0], y[0]
+
+
False
+False
+(12, 1, 4) (12, 2, 3)
+
+
+
(array([[0, 2, 3, 4]]),
+ array([[ 50,  60,  80],
+        [500, 600, 800]]))
+
+
+
+

source

+
+
+

df2Xy

+
+
 df2Xy (df, sample_col=None, feat_col=None, data_cols=None,
+        target_col=None, steps_in_rows=False, to3d=True, splits=None,
+        sort_by=None, ascending=True, y_func=None, return_names=False)
+
+

This function allows you to transform a pandas dataframe into X and y numpy arrays that can be used to create a TSDataset. sample_col: column that uniquely identifies each sample. feat_col: used for multivariate datasets. It indicates which is the column that indicates the feature by row. data_col: indicates ths column/s where the data is located. If None, it means all columns (except the sample_col, feat_col, and target_col) target_col: indicates the column/s where the target is. steps_in_rows: flag to indicate if each step is in a different row or in a different column (default). to3d: turns X to 3d (including univariate time series) sort_by: this is used to pass any colum/s that are needed to sort the steps in the sequence. If you pass a sample_col and/ or feat_col these will be automatically used before the sort_by column/s, and you don’t need to add them to the sort_by column/s list. y_func: function used to calculate y for each sample (and target_col) return_names: flag to return the names of the columns from where X was generated

+
+

source

+
+
+

split_Xy

+
+
 split_Xy (X, y=None, splits=None)
+
+
+
df = pd.DataFrame()
+df['sample_id'] = np.array([1,1,1,2,2,2,3,3,3])
+df['var1'] = df['sample_id'] * 10 + df.index.values
+df['var2'] = df['sample_id'] * 100 + df.index.values
+df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idvar1var2
0110100
1111101
2112102
3223203
4224204
5225205
6336306
7337307
8338308
+ +
+
+
+
+
X_df, y_df = df2Xy(df, sample_col='sample_id', steps_in_rows=True)
+test_eq(X_df[0], np.array([[10, 11, 12], [100, 101, 102]]))
+
+
+
n_samples = 1_000
+n_rows = 10_000
+
+sample_ids = np.arange(n_samples).repeat(n_rows//n_samples).reshape(-1,1)
+feat_ids = np.tile(np.arange(n_rows // n_samples), n_samples).reshape(-1,1)
+cont = np.random.randn(n_rows, 6)
+ind_cat = np.random.randint(0, 3, (n_rows, 1))
+target = np.array([0,1,2])[ind_cat]
+ind_cat2 = np.random.randint(0, 3, (n_rows, 1))
+target2 = np.array([100,200,300])[ind_cat2]
+data = np.concatenate([sample_ids, feat_ids, cont, target, target], -1)
+columns = ['sample_id', 'feat_id'] + (np.arange(6) + 1).astype(str).tolist() + ['target'] + ['target2']
+df = pd.DataFrame(data, columns=columns)
+idx = random_choice(np.arange(len(df)), len(df), False)
+new_dtypes = {'sample_id':np.int32, 'feat_id':np.int32, '1':np.float32, '2':np.float32, '3':np.float32, '4':np.float32, '5':np.float32, '6':np.float32}
+df = df.astype(dtype=new_dtypes)
+df = df.loc[idx].reset_index(drop=True)
+df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idfeat_id123456targettarget2
06252-1.3905490.770179-0.8484800.853631-0.3099840.8743382.02.0
152641.1523972.064397-0.392603-0.275797-0.047526-2.2488142.02.0
23976-1.0529300.631396-0.758800-0.606483-2.776054-0.4577551.01.0
35288-0.178637-1.253319-1.1540140.9138761.051010-0.6357621.01.0
424920.6125950.8882970.0650241.621935-0.1804790.3099771.01.0
.................................
99952721-0.4323251.6452621.502872-1.1448590.9196530.4143040.00.0
99969205-0.724702-1.4718321.2090861.2065320.5556760.3527262.02.0
999766261.122043-0.379357-0.344517-1.5450910.1878941.0625102.02.0
9998717-0.053582-0.854992-1.118632-1.967820-0.3448040.1281050.00.0
99994074-1.565716-0.947183-0.401944-1.309024-0.237755-0.7432512.02.0
+ +

10000 rows × 10 columns

+
+
+
+
+
from scipy.stats import mode
+
+
+
def y_func(o): return mode(o, axis=1, keepdims=True).mode
+X, y = df2xy(df, sample_col='sample_id', feat_col='feat_id', target_col=['target', 'target2'], sort_by=['sample_id', 'feat_id'], y_func=y_func)
+test_eq(X.shape, (1000, 10, 6))
+test_eq(y.shape, (1000, 2))
+rand_idx = np.random.randint(0, np.max(df.sample_id))
+sorted_df = df.sort_values(by=['sample_id', 'feat_id'], kind='stable').reset_index(drop=True)
+test_eq(X[rand_idx], sorted_df[sorted_df.sample_id == rand_idx][['1', '2', '3', '4', '5', '6']].values)
+test_eq(np.squeeze(mode(sorted_df[sorted_df.sample_id == rand_idx][['target', 'target2']].values).mode), y[rand_idx])
+
+
+
# Univariate
+from io import StringIO
+
+
+
TESTDATA = StringIO("""sample_id;value_0;value_1;target
+    rob;2;3;0
+    alice;6;7;1
+    eve;11;12;2
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+display(df)
+X, y = df2Xy(df, sample_col='sample_id', target_col='target', data_cols=['value_0', 'value_1'], sort_by='sample_id')
+test_eq(X.shape, (3, 1, 2))
+test_eq(y.shape, (3,))
+X, y
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idvalue_0value_1target
0rob230
1alice671
2eve11122
+ +
+
+
+
(array([[[ 6,  7]],
+ 
+        [[11, 12]],
+ 
+        [[ 2,  3]]]),
+ array([1, 2, 0]))
+
+
+
+
# Univariate
+TESTDATA = StringIO("""sample_id;timestep;values;target
+    rob;1;2;0
+    alice;1;6;1
+    eve;1;11;2
+    
+    rob;2;3;0
+    alice;2;7;1
+    eve;2;12;2
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+display(df)
+def y_func(o): return mode(o, axis=1).mode
+X, y = df2xy(df, sample_col='sample_id', target_col='target', data_cols=['values'], sort_by='timestep', to3d=True, y_func=y_func)
+test_eq(X.shape, (3, 1, 2))
+test_eq(y.shape, (3, ))
+print(X, y)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idtimestepvaluestarget
0rob120
1alice161
2eve1112
3rob230
4alice271
5eve2122
+ +
+
+
+
[[[ 6  7]]
+
+ [[11 12]]
+
+ [[ 2  3]]] [1 2 0]
+
+
+
+
# Multivariate
+TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target
+    rob;green;2;3;0
+    rob;yellow;3;4;0
+    rob;blue;4;5;0
+    rob;red;5;6;0
+    alice;green;6;7;1
+    alice;yellow;7;8;1
+    alice;blue;8;9;1
+    alice;red;9;10;1
+    eve;yellow;11;12;2
+    eve;green;10;11;2
+    eve;blue;12;12;2
+    eve;red;13;14;2
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+idx = random_choice(len(df), len(df), False)
+df = df.iloc[idx]
+display(df)
+def y_func(o): return mode(o, axis=1).mode
+X, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col='target', data_cols=['value_0', 'value_1'], y_func=y_func)
+print(X, y)
+test_eq(X.shape, (3, 4, 2))
+test_eq(y.shape, (3,))
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idtraitvalue_0value_1target
9evegreen10112
10eveblue12122
3robred560
0robgreen230
6aliceblue891
2robblue450
1robyellow340
4alicegreen671
7alicered9101
8eveyellow11122
11evered13142
5aliceyellow781
+ +
+
+
+
[[[ 8  9]
+  [ 6  7]
+  [ 9 10]
+  [ 7  8]]
+
+ [[12 12]
+  [10 11]
+  [13 14]
+  [11 12]]
+
+ [[ 4  5]
+  [ 2  3]
+  [ 5  6]
+  [ 3  4]]] [1 2 0]
+
+
+
+
# Multivariate, multi-label
+TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target1;target2
+    rob;green;2;3;0;0
+    rob;yellow;3;4;0;0
+    rob;blue;4;5;0;0
+    rob;red;5;6;0;0
+    alice;green;6;7;1;0
+    alice;yellow;7;8;1;0
+    alice;blue;8;9;1;0
+    alice;red;9;10;1;0
+    eve;yellow;11;12;2;1
+    eve;green;10;11;2;1
+    eve;blue;12;12;2;1
+    eve;red;13;14;2;1
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+display(df)
+def y_func(o): return mode(o, axis=1, keepdims=True).mode
+X, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col=['target1', 'target2'], data_cols=['value_0', 'value_1'], y_func=y_func)
+test_eq(X.shape, (3, 4, 2))
+test_eq(y.shape, (3, 2))
+print(X, y)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idtraitvalue_0value_1target1target2
0robgreen2300
1robyellow3400
2robblue4500
3robred5600
4alicegreen6710
5aliceyellow7810
6aliceblue8910
7alicered91010
8eveyellow111221
9evegreen101121
10eveblue121221
11evered131421
+ +
+
+
+
[[[ 8  9]
+  [ 6  7]
+  [ 9 10]
+  [ 7  8]]
+
+ [[12 12]
+  [10 11]
+  [13 14]
+  [11 12]]
+
+ [[ 4  5]
+  [ 2  3]
+  [ 5  6]
+  [ 3  4]]] [[1 0]
+ [2 1]
+ [0 0]]
+
+
+
+
# Multivariate, unlabeled
+TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target
+    rob;green;2;3;0
+    rob;yellow;3;4;0
+    rob;blue;4;5;0
+    rob;red;5;6;0
+    alice;green;6;7;1
+    alice;yellow;7;8;1
+    alice;blue;8;9;1
+    alice;red;9;10;1
+    eve;yellow;11;12;2
+    eve;green;10;11;2
+    eve;blue;12;12;2
+    eve;red;13;14;2
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+idx = random_choice(len(df), len(df), False)
+df = df.iloc[idx]
+display(df)
+def y_func(o): return mode(o, axis=1, keepdims=True).mode
+X, y = df2xy(df, sample_col='sample_id', feat_col='trait', data_cols=['value_0', 'value_1'], y_func=y_func)
+print(X, y)
+test_eq(X.shape, (3, 4, 2))
+test_eq(y, None)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idtraitvalue_0value_1target
11evered13142
3robred560
9evegreen10112
10eveblue12122
6aliceblue891
1robyellow340
4alicegreen671
2robblue450
0robgreen230
8eveyellow11122
7alicered9101
5aliceyellow781
+ +
+
+
+
[[[ 8  9]
+  [ 6  7]
+  [ 9 10]
+  [ 7  8]]
+
+ [[12 12]
+  [10 11]
+  [13 14]
+  [11 12]]
+
+ [[ 4  5]
+  [ 2  3]
+  [ 5  6]
+  [ 3  4]]] None
+
+
+
+
TESTDATA = StringIO("""sample_id;trait;timestep;values;target
+    rob;green;1;2;0
+    rob;yellow;1;3;0
+    rob;blue;1;4;0
+    rob;red;1;5;0
+    alice;green;1;6;1
+    alice;yellow;1;7;1
+    alice;blue;1;8;1
+    alice;red;1;9;1
+    eve;yellow;1;11;2
+    eve;green;1;10;2
+    eve;blue;1;12;2
+    eve;red;1;13;2
+    
+    rob;green;2;3;0
+    rob;yellow;2;4;0
+    rob;blue;2;5;0
+    rob;red;2;6;0
+    alice;green;2;7;1
+    alice;yellow;2;8;1
+    alice;blue;2;9;1
+    alice;red;2;10;1
+    eve;yellow;2;12;2
+    eve;green;2;11;2
+    eve;blue;2;13;2
+    eve;red;2;14;2
+    """)
+
+df = pd.read_csv(TESTDATA, sep=";")
+display(df)
+def y_func(o): return mode(o, axis=1).mode
+X, y = df2xy(df, sample_col='sample_id', feat_col='trait', sort_by='timestep', target_col='target', data_cols=['values'], y_func=y_func)
+print(X, y)
+test_eq(X.shape, (3, 4, 2))
+test_eq(y.shape, (3, ))
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idtraittimestepvaluestarget
0robgreen120
1robyellow130
2robblue140
3robred150
4alicegreen161
5aliceyellow171
6aliceblue181
7alicered191
8eveyellow1112
9evegreen1102
10eveblue1122
11evered1132
12robgreen230
13robyellow240
14robblue250
15robred260
16alicegreen271
17aliceyellow281
18aliceblue291
19alicered2101
20eveyellow2122
21evegreen2112
22eveblue2132
23evered2142
+ +
+
+
+
[[[ 8  9]
+  [ 6  7]
+  [ 9 10]
+  [ 7  8]]
+
+ [[12 13]
+  [10 11]
+  [13 14]
+  [11 12]]
+
+ [[ 4  5]
+  [ 2  3]
+  [ 5  6]
+  [ 3  4]]] [1 2 0]
+
+
+
+

source

+
+
+

df2np3d

+
+
 df2np3d (df, groupby, data_cols=None)
+
+

Transforms a df (with the same number of rows per group in groupby) to a 3d ndarray

+
+
user = np.array([1,2]).repeat(4).reshape(-1,1)
+val = np.random.rand(8, 3)
+data = np.concatenate([user, val], axis=-1)
+df = pd.DataFrame(data, columns=['user', 'x1', 'x2', 'x3'])
+test_eq(df2np3d(df, ['user'], ['x1', 'x2', 'x3']).shape, (2, 3, 4))
+
+
+

source

+
+
+

add_missing_value_cols

+
+
 add_missing_value_cols (df, cols=None, dtype=<class 'float'>,
+                         fill_value=None)
+
+
+
data = np.random.randn(10, 2)
+mask = data > .8
+data[mask] = np.nan
+df = pd.DataFrame(data, columns=['A', 'B'])
+df = add_missing_value_cols(df, cols=None, dtype=float)
+test_eq(df['A'].isnull().sum(), df['missing_A'].sum())
+test_eq(df['B'].isnull().sum(), df['missing_B'].sum())
+df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ABmissing_Amissing_B
00.476712-0.8807970.00.0
1NaN-1.5172101.00.0
2-1.348997-0.8784410.00.0
3NaN0.2907561.00.0
40.569218-1.4157770.00.0
50.591641-2.1338600.00.0
6NaNNaN1.01.0
7NaN-0.1193971.00.0
8-0.7279880.0572540.00.0
9-0.631352-0.2190280.00.0
+ +
+
+
+
+

source

+
+
+

add_missing_timestamps

+
+
 add_missing_timestamps (df, datetime_col=None, use_index=False,
+                         unique_id_cols=None, groupby=None,
+                         fill_value=nan, range_by_group=True,
+                         start_date=None, end_date=None, freq=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfpandas DataFrame
datetime_colNoneTypeNonecolumn that contains the datetime data (without duplicates within groups)
use_indexboolFalseindicates if the index contains the datetime data
unique_id_colsNoneTypeNonecolumn used to identify unique_ids
groupbyNoneTypeNonesame as unique_id_cols. Will be deprecated. Kept for compatiblity.
fill_valuefloatnanvalues that will be insert where missing dates exist. Default:np.nan
range_by_groupboolTrueif True, dates will be filled between min and max dates for each group. Otherwise, between the min and max dates in the df.
start_dateNoneTypeNonestart date to fill in missing dates (same for all unique_ids)
end_dateNoneTypeNoneend date to fill in missing dates (same for all unique_ids)
freqNoneTypeNonefrequency used to fill in the missing datetime
+
+
# Filling dates between min and max dates
+dates = pd.date_range('2021-05-01', '2021-05-07').values
+data = np.zeros((len(dates), 3))
+data[:, 0] = dates
+data[:, 1] = np.random.rand(len(dates))
+data[:, 2] = np.random.rand(len(dates))
+cols = ['date', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
+date_df_with_missing_dates
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature2
02021-05-010.5372480.670897
12021-05-030.2999120.421039
22021-05-050.6483720.204641
32021-05-060.0174750.022183
42021-05-070.9659190.470055
+ +
+
+
+
+
# No groups
+expected_output_df = date_df.copy()
+expected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 
+                                   'date', 
+                                   unique_id_cols=None, 
+                                   fill_value=np.nan, 
+                                   range_by_group=False)
+test_eq(output_df, expected_output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature2
02021-05-010.5372480.670897
12021-05-02NaNNaN
22021-05-030.2999120.421039
32021-05-04NaNNaN
42021-05-050.6483720.204641
52021-05-060.0174750.022183
62021-05-070.9659190.470055
+ +
+
+
+
+
# Filling dates between min and max dates for each value in groupby column
+dates = pd.date_range('2021-05-01', '2021-05-07').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 4))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+data[:, 3] = np.random.rand(len(dates))
+cols = ['date', 'id', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([0,1,3,8,11,13]).reset_index(drop=True)
+date_df_with_missing_dates
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-0300.0593980.255853
12021-05-0500.2355360.455261
22021-05-0600.7244230.280910
32021-05-0700.3036820.853959
42021-05-0110.0224240.408510
52021-05-0310.5081900.603880
62021-05-0410.3309240.108156
72021-05-0610.6014810.020182
+ +
+
+
+
+
# groupby='id', range_by_group=True
+expected_output_df = date_df.drop([0,1,13]).reset_index(drop=True)  
+expected_output_df.loc[[1,6,9], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 
+                                   'date', 
+                                   unique_id_cols='id', 
+                                   fill_value=np.nan, 
+                                   range_by_group=True)
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-0300.0593980.255853
12021-05-040NaNNaN
22021-05-0500.2355360.455261
32021-05-0600.7244230.280910
42021-05-0700.3036820.853959
52021-05-0110.0224240.408510
62021-05-021NaNNaN
72021-05-0310.5081900.603880
82021-05-0410.3309240.108156
92021-05-051NaNNaN
102021-05-0610.6014810.020182
+ +
+
+
+
+
# groupby='id', range_by_group=False
+expected_output_df = date_df.copy() 
+expected_output_df.loc[[0,1,3,8,11,13], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 
+                                   'date', 
+                                   unique_id_cols='id', 
+                                   fill_value=np.nan, 
+                                   range_by_group=False)
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-010NaNNaN
12021-05-020NaNNaN
22021-05-0300.0593980.255853
32021-05-040NaNNaN
42021-05-0500.2355360.455261
52021-05-0600.7244230.280910
62021-05-0700.3036820.853959
72021-05-0110.0224240.408510
82021-05-021NaNNaN
92021-05-0310.5081900.603880
102021-05-0410.3309240.108156
112021-05-051NaNNaN
122021-05-0610.6014810.020182
132021-05-071NaNNaN
+ +
+
+
+
+
# Filling dates between min and max timestamps
+dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
+data = np.zeros((len(dates), 3))
+data[:, 0] = dates
+data[:, 1] = np.random.rand(len(dates))
+data[:, 2] = np.random.rand(len(dates))
+cols = ['date', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
+date_df_with_missing_dates
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature2
02021-05-01 00:00:000.7748460.624488
12021-05-01 08:00:000.6838370.441230
22021-05-01 16:00:000.1422690.279095
32021-05-01 20:00:000.9536860.205123
+ +
+
+
+
+
# No groups
+expected_output_df = date_df.copy()
+expected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 'date', groupby=None, fill_value=np.nan, range_by_group=False, freq='4H')
+test_eq(output_df, expected_output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature2
02021-05-01 00:00:000.7748460.624488
12021-05-01 04:00:00NaNNaN
22021-05-01 08:00:000.6838370.441230
32021-05-01 12:00:00NaNNaN
42021-05-01 16:00:000.1422690.279095
52021-05-01 20:00:000.9536860.205123
+ +
+
+
+
+
# Filling missing values between min and max timestamps for each value in groupby column
+
+dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 4))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+data[:, 3] = np.random.rand(len(dates))
+cols = ['date', 'id', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([0,1,3,8,9,11]).reset_index(drop=True)
+date_df_with_missing_dates
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-01 08:00:0000.4387840.084472
12021-05-01 16:00:0000.0596130.445215
22021-05-01 20:00:0000.5118070.001034
32021-05-01 00:00:0010.9701150.280121
42021-05-01 04:00:0010.7750510.436359
52021-05-01 16:00:0010.4699870.457442
+ +
+
+
+
+
# groupby='id', range_by_group=True
+expected_output_df = date_df.drop([0,1,11]).reset_index(drop=True)  
+expected_output_df.loc[[1,6,7], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(),
+                                   'date', 
+                                   groupby='id', 
+                                   fill_value=np.nan, 
+                                   range_by_group=True, 
+                                   freq='4H')
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-01 08:00:0000.4387840.084472
12021-05-01 12:00:000NaNNaN
22021-05-01 16:00:0000.0596130.445215
32021-05-01 20:00:0000.5118070.001034
42021-05-01 00:00:0010.9701150.280121
52021-05-01 04:00:0010.7750510.436359
62021-05-01 08:00:001NaNNaN
72021-05-01 12:00:001NaNNaN
82021-05-01 16:00:0010.4699870.457442
+ +
+
+
+
+
# groupby='id', range_by_group=False
+expected_output_df = date_df.copy() 
+expected_output_df.loc[[0,1,3,8,9,11], ['feature1', 'feature2']] = np.nan
+display(expected_output_df)
+output_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 
+                                   'date', 
+                                   groupby='id', 
+                                   fill_value=np.nan, 
+                                   range_by_group=False, 
+                                   freq='4H')
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-01 00:00:000NaNNaN
12021-05-01 04:00:000NaNNaN
22021-05-01 08:00:0000.4387840.084472
32021-05-01 12:00:000NaNNaN
42021-05-01 16:00:0000.0596130.445215
52021-05-01 20:00:0000.5118070.001034
62021-05-01 00:00:0010.9701150.280121
72021-05-01 04:00:0010.7750510.436359
82021-05-01 08:00:001NaNNaN
92021-05-01 12:00:001NaNNaN
102021-05-01 16:00:0010.4699870.457442
112021-05-01 20:00:001NaNNaN
+ +
+
+
+
+
# No groups, with duplicate dates ==> FAILS
+dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
+data = np.zeros((len(dates), 3))
+data[:, 0] = dates
+data[:, 1] = np.random.rand(len(dates))
+data[:, 2] = np.random.rand(len(dates))
+cols = ['date', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
+date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
+display(date_df_with_missing_dates)
+test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby=None, fill_value=np.nan, range_by_group=False, freq='4H'), )
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature2
02021-05-01 00:00:000.7550920.002068
12021-05-01 08:00:000.5706930.087019
22021-05-01 16:00:000.2288690.856618
32021-05-01 16:00:000.3495060.428253
+ +
+
+
+
+
# groupby='id', range_by_group=True, with duplicate dates ==> FAILS
+
+dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 4))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+data[:, 3] = np.random.rand(len(dates))
+cols = ['date', 'id', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)
+date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
+display(date_df_with_missing_dates)
+test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=True, freq='4H'), 
+          contains='cannot handle a non-unique multi-index!')
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-01 08:00:0000.0403450.312874
12021-05-01 12:00:0000.7134240.597211
22021-05-01 16:00:0000.4683820.652314
32021-05-01 16:00:0000.3966910.605664
42021-05-01 00:00:0010.8046460.964115
52021-05-01 04:00:0010.0899250.072410
62021-05-01 16:00:0010.8307860.560658
+ +
+
+
+
+
# groupby='id', range_by_group=FALSE, with duplicate dates ==> FAILS
+
+dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 4))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+data[:, 3] = np.random.rand(len(dates))
+cols = ['date', 'id', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)
+date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
+display(date_df_with_missing_dates)
+test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=False, freq='4H'), 
+          contains='cannot handle a non-unique multi-index!')
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-01 08:00:0000.4485080.953596
12021-05-01 12:00:0000.8688020.526845
22021-05-01 16:00:0000.2230700.304842
32021-05-01 16:00:0000.6456610.270956
42021-05-01 00:00:0010.0172500.787757
52021-05-01 04:00:0010.7833410.608269
62021-05-01 16:00:0010.4262470.926149
+ +
+
+
+
+

source

+
+
+

time_encoding

+
+
 time_encoding (series, freq, max_val=None)
+
+

Transforms a pandas series of dtype datetime64 (of any freq) or DatetimeIndex into 2 float arrays

+

Available options: microsecond, millisecond, second, minute, hour, day = day_of_month = dayofmonth, day_of_week = weekday = dayofweek, day_of_year = dayofyear, week = week_of_year = weekofyear, month and year

+
+
for freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:
+    tdf = pd.DataFrame(pd.date_range('2021-03-01', dt.datetime.today()), columns=['date'])
+    a,b = time_encoding(tdf.date, freq=freq)
+    plt.plot(a)
+    plt.plot(b)
+    plt.title(freq)
+    plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
for freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:
+    dateindex = pd.date_range('2021-03-01', dt.datetime.today())
+    a,b = time_encoding(dateindex, freq=freq)
+    plt.plot(a)
+    plt.plot(b)
+    plt.title(freq)
+    plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
dow_sin, dow_cos = time_encoding(date_df['date'], 'dayofweek')
+plt.plot(dow_sin)
+plt.plot(dow_cos)
+plt.title('DayOfWeek')
+plt.show()
+date_df['dow_sin'] = dow_sin
+date_df['dow_cos'] = dow_cos
+date_df
+
+
+
+

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2dow_sindow_cos
02021-05-01 00:00:0000.7735970.465634-0.974928-0.222521
12021-05-01 04:00:0000.2655260.963753-0.974928-0.222521
22021-05-01 08:00:0000.4485080.953596-0.974928-0.222521
32021-05-01 12:00:0000.8688020.526845-0.974928-0.222521
42021-05-01 16:00:0000.2230700.304842-0.974928-0.222521
52021-05-01 20:00:0000.6456610.270956-0.974928-0.222521
62021-05-01 00:00:0010.0172500.787757-0.974928-0.222521
72021-05-01 04:00:0010.7833410.608269-0.974928-0.222521
82021-05-01 08:00:0010.6298750.170726-0.974928-0.222521
92021-05-01 12:00:0010.3029270.682136-0.974928-0.222521
102021-05-01 16:00:0010.4262470.926149-0.974928-0.222521
112021-05-01 20:00:0010.8306240.543715-0.974928-0.222521
+ +
+
+
+
+

source

+
+
+

get_gaps

+
+
 get_gaps (o:torch.Tensor, forward:bool=True, backward:bool=True,
+           nearest:bool=True, normalize:bool=True)
+
+

Number of sequence steps from previous, to next and/or to nearest real value along the last dimension of 3D arrays or tensors

+
+

source

+
+
+

nearest_gaps

+
+
 nearest_gaps (o, normalize=True)
+
+

Number of sequence steps to nearest real value along the last dimension of 3D arrays or tensors

+
+

source

+
+
+

backward_gaps

+
+
 backward_gaps (o, normalize=True)
+
+

Number of sequence steps to next real value along the last dimension of 3D arrays or tensors

+
+

source

+
+
+

forward_gaps

+
+
 forward_gaps (o, normalize=True)
+
+

Number of sequence steps since previous real value along the last dimension of 3D arrays or tensors

+
+
t = torch.rand(1, 2, 8)
+arr = t.numpy()
+t[t <.6] = np.nan
+test_ge(nearest_gaps(t).min().item(), 0)
+test_ge(nearest_gaps(arr).min(), 0)
+test_le(nearest_gaps(t).min().item(), 1)
+test_le(nearest_gaps(arr).min(), 1)
+test_eq(torch.isnan(forward_gaps(t)).sum(), 0)
+test_eq(np.isnan(forward_gaps(arr)).sum(), 0)
+ag = get_gaps(t)
+test_eq(ag.shape, (1,6,8))
+test_eq(torch.isnan(ag).sum(), 0)
+
+
+

source

+
+
+

add_delta_timestamp_cols

+
+
 add_delta_timestamp_cols (df, cols=None, groupby=None, forward=True,
+                           backward=True, nearest=True, normalize=True)
+
+
+
# Add delta timestamp features for the no groups setting
+dates = pd.date_range('2021-05-01', '2021-05-07').values
+data = np.zeros((len(dates), 2))
+data[:, 0] = dates
+data[:, 1] = np.random.rand(len(dates))
+
+cols = ['date', 'feature1']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float})
+date_df.loc[[1,3,4],'feature1'] = np.nan
+date_df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1
02021-05-010.132532
12021-05-02NaN
22021-05-030.403176
32021-05-04NaN
42021-05-05NaN
52021-05-060.179554
62021-05-070.446536
+ +
+
+
+
+
# No groups
+expected_output_df = date_df.copy()
+expected_output_df['feature1_dt_fwd'] = np.array([1,1,2,1,2,3,1])
+expected_output_df['feature1_dt_bwd'] = np.array([2,1,3,2,1,1,1])
+expected_output_df['feature1_dt_nearest'] = np.array([1,1,2,1,1,1,1])
+
+display(expected_output_df)
+output_df = add_delta_timestamp_cols(date_df, cols='feature1', normalize=False)
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datefeature1feature1_dt_fwdfeature1_dt_bwdfeature1_dt_nearest
02021-05-010.132532121
12021-05-02NaN111
22021-05-030.403176232
32021-05-04NaN121
42021-05-05NaN211
52021-05-060.179554311
62021-05-070.446536111
+ +
+
+
+
+
# Add delta timestamp features within a group
+dates = pd.date_range('2021-05-01', '2021-05-07').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 3))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+
+cols = ['date', 'id', 'feature1']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float})
+date_df.loc[[1,3,4,8,9,11],'feature1'] = np.nan
+date_df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1
02021-05-0100.405327
12021-05-020NaN
22021-05-0300.055934
32021-05-040NaN
42021-05-050NaN
52021-05-0600.698408
62021-05-0700.064831
72021-05-0110.407541
82021-05-021NaN
92021-05-031NaN
102021-05-0410.113590
112021-05-051NaN
122021-05-0610.548088
132021-05-0710.348813
+ +
+
+
+
+
# groupby='id'
+expected_output_df = date_df.copy()
+expected_output_df['feature1_dt_fwd'] = np.array([1,1,2,1,2,3,1,1,1,2,3,1,2,1])
+expected_output_df['feature1_dt_bwd'] = np.array([2,1,3,2,1,1,1,3,2,1,2,1,1,1])
+expected_output_df['feature1_dt_nearest'] = np.array([1,1,2,1,1,1,1,1,1,1,2,1,1,1])
+
+display(expected_output_df)
+output_df = add_delta_timestamp_cols(date_df, cols='feature1', groupby='id', normalize=False)
+test_eq(expected_output_df, output_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature1_dt_fwdfeature1_dt_bwdfeature1_dt_nearest
02021-05-0100.405327121
12021-05-020NaN111
22021-05-0300.055934232
32021-05-040NaN121
42021-05-050NaN211
52021-05-0600.698408311
62021-05-0700.064831111
72021-05-0110.407541131
82021-05-021NaN121
92021-05-031NaN211
102021-05-0410.113590322
112021-05-051NaN111
122021-05-0610.548088211
132021-05-0710.348813111
+ +
+
+
+

SlidingWindow and SlidingWindowPanel are 2 useful functions that will allow you to create an array with segments of a pandas dataframe based on multiple criteria.

+
+

source

+
+
+

SlidingWindow

+
+
 SlidingWindow (window_len:int, stride:Optional[int]=1, start:int=0,
+                pad_remainder:bool=False, padding:str='post',
+                padding_value:float=nan, add_padding_feature:bool=True,
+                get_x:Union[NoneType,int,list]=None,
+                get_y:Union[NoneType,int,list]=None,
+                y_func:Optional[<built-infunctioncallable>]=None,
+                output_processor:Optional[<built-
+                infunctioncallable>]=None, copy:bool=False,
+                horizon:Union[int,list]=1, seq_first:bool=True,
+                sort_by:Optional[list]=None, ascending:bool=True,
+                check_leakage:bool=True)
+
+

Applies a sliding window to a 1d or 2d input (np.ndarray, torch.Tensor or pd.DataFrame)

+

Input:

+
    You can use np.ndarray, pd.DataFrame or torch.Tensor as input
+
+    shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
window_lenintlength of lookback window
strideUnion[None, int]1n datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap)
startint0determines the step where the first window is applied: 0 (default) or a given step (int). Previous steps will be discarded.
pad_remainderboolFalseallows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data).
paddingstrpost‘pre’ or ‘post’ (optional, defaults to ‘pre’): pad either before or after each sequence. If pad_remainder == False, it indicates the starting point to create the sequence (‘pre’ from the end, and ‘post’ from the beginning)
padding_valuefloatnanvalue (float) that will be used for padding. Default: np.nan
add_padding_featureboolTrueadd an additional feature indicating whether each timestep is padded (1) or not (0).
get_xUnion[None, int, list]Noneindices of columns that contain the independent variable (xs). If None, all data will be used as x.
get_yUnion[None, int, list]Noneindices of columns that contain the target (ys). If None, all data will be used as y. [] means no y data is created (unlabeled data).
y_funcOptional[callable]Noneoptional function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1!
output_processorOptional[callable]Noneoptional function to process the final output (X (and y if available)). This is useful when some values need to be removed.The function should take X and y (even if it’s None) as arguments.
copyboolFalsecopy the original object to avoid changes in it.
horizonUnion[int, list]1number of future datapoints to predict (y). If get_y is [] horizon will be set to 0.
seq_firstboolTrueTrue if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len)
sort_byOptional[list]Nonecolumn/s used for sorting the array in ascending order
ascendingboolTrueused in sorting
check_leakageboolTruechecks if there’s leakage in the output between X and y
+
+
wl = 5
+stride = 5
+
+t = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)
+X
+
+
input shape: (13, 3)
+
+
+
array([[[ 0.,  1.,  2.,  3.,  4.],
+        [ 0.,  1.,  2.,  3.,  4.],
+        [ 0.,  1.,  2.,  3.,  4.],
+        [ 0.,  0.,  0.,  0.,  0.]],
+
+       [[ 5.,  6.,  7.,  8.,  9.],
+        [ 5.,  6.,  7.,  8.,  9.],
+        [ 5.,  6.,  7.,  8.,  9.],
+        [ 0.,  0.,  0.,  0.,  0.]],
+
+       [[10., 11., 12., nan, nan],
+        [10., 11., 12., nan, nan],
+        [10., 11., 12., nan, nan],
+        [ 0.,  0.,  0.,  1.,  1.]]])
+
+
+
+
wl = 5
+t = np.arange(10)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl)(t)
+test_eq(X.shape[1:], (1, wl))
+itemify(X,)
+
+
input shape: (10,)
+
+
+
(#5) [(array([[0, 1, 2, 3, 4]]),),(array([[1, 2, 3, 4, 5]]),),(array([[2, 3, 4, 5, 6]]),),(array([[3, 4, 5, 6, 7]]),),(array([[4, 5, 6, 7, 8]]),)]
+
+
+
+
wl = 5
+h = 1
+
+t = np.arange(10)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=1, horizon=h)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (10,)
+[(array([[0, 1, 2, 3, 4]]), 5), (array([[1, 2, 3, 4, 5]]), 6), (array([[2, 3, 4, 5, 6]]), 7), (array([[3, 4, 5, 6, 7]]), 8), (array([[4, 5, 6, 7, 8]]), 9)]
+
+
+
+
wl = 5
+h = 2 # 2 or more
+
+t = np.arange(10)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, horizon=h)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, (2, ))
+
+
input shape: (10,)
+[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]
+
+
+
+
wl = 5
+h = 2 # 2 or more
+
+t = np.arange(10).reshape(1, -1)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=1, horizon=h, get_y=None, seq_first=False)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, (2, ))
+
+
input shape: (1, 10)
+[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]
+
+
+
+
wl = 5
+h = 2 # 2 or more
+
+t = np.arange(10).reshape(1, -1)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=1, horizon=h, seq_first=False)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+
+
input shape: (1, 10)
+[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]
+
+
+
+
wl = 5
+
+t = np.arange(10).reshape(1, -1)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=3, horizon=1, get_y=None, seq_first=False)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (1, 10)
+[(array([[0, 1, 2, 3, 4]]), 5), (array([[3, 4, 5, 6, 7]]), 8)]
+
+
+
+
wl = 5
+start = 3
+
+t = np.arange(20)
+print('input shape:', t.shape)
+X, y = SlidingWindow(wl, stride=None, horizon=1, start=start)(t)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (20,)
+[(array([[3, 4, 5, 6, 7]]), 8), (array([[ 8,  9, 10, 11, 12]]), 13), (array([[13, 14, 15, 16, 17]]), 18)]
+
+
+
+
wl = 5
+
+t = np.arange(20)
+print('input shape:', t.shape)
+df = pd.DataFrame(t, columns=['var'])
+display(df)
+X, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None)(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (20,)
+[(array([[0, 1, 2, 3, 4]]), 5), (array([[5, 6, 7, 8, 9]]), 10), (array([[10, 11, 12, 13, 14]]), 15)]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var
00
11
22
33
44
55
66
77
88
99
1010
1111
1212
1313
1414
1515
1616
1717
1818
1919
+ +
+
+
+
+
wl = 5
+
+t = np.arange(20)
+print('input shape:', t.shape)
+df = pd.DataFrame(t, columns=['var'])
+display(df)
+X, y = SlidingWindow(wl, stride=1, horizon=1, get_y=None)(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (20,)
+[(array([[0, 1, 2, 3, 4]]), 5), (array([[1, 2, 3, 4, 5]]), 6), (array([[2, 3, 4, 5, 6]]), 7), (array([[3, 4, 5, 6, 7]]), 8), (array([[4, 5, 6, 7, 8]]), 9), (array([[5, 6, 7, 8, 9]]), 10), (array([[ 6,  7,  8,  9, 10]]), 11), (array([[ 7,  8,  9, 10, 11]]), 12), (array([[ 8,  9, 10, 11, 12]]), 13), (array([[ 9, 10, 11, 12, 13]]), 14), (array([[10, 11, 12, 13, 14]]), 15), (array([[11, 12, 13, 14, 15]]), 16), (array([[12, 13, 14, 15, 16]]), 17), (array([[13, 14, 15, 16, 17]]), 18), (array([[14, 15, 16, 17, 18]]), 19)]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var
00
11
22
33
44
55
66
77
88
99
1010
1111
1212
1313
1414
1515
1616
1717
1818
1919
+ +
+
+
+
+
wl = 5
+
+t = np.arange(20)
+print('input shape:', t.shape)
+df = pd.DataFrame(t, columns=['var']).T
+display(df)
+X, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None, seq_first=False)(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: (20,)
+[(array([[0, 1, 2, 3, 4]]), 5), (array([[5, 6, 7, 8, 9]]), 10), (array([[10, 11, 12, 13, 14]]), 15)]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
012345678910111213141516171819
var012345678910111213141516171819
+ +
+
+
+
+
wl = 5
+n_vars = 3
+
+t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
+print('input shape:', t.shape)
+df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
+display(df)
+X, y = SlidingWindow(wl, horizon=1)(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (n_vars, wl))
+
+
input shape: torch.Size([10, 3])
+[(array([[  0,   1,   2,   3,   4],
+       [  0,  10,  20,  30,  40],
+       [  0, 100, 200, 300, 400]]), array([  5,  50, 500])), (array([[  1,   2,   3,   4,   5],
+       [ 10,  20,  30,  40,  50],
+       [100, 200, 300, 400, 500]]), array([  6,  60, 600])), (array([[  2,   3,   4,   5,   6],
+       [ 20,  30,  40,  50,  60],
+       [200, 300, 400, 500, 600]]), array([  7,  70, 700])), (array([[  3,   4,   5,   6,   7],
+       [ 30,  40,  50,  60,  70],
+       [300, 400, 500, 600, 700]]), array([  8,  80, 800])), (array([[  4,   5,   6,   7,   8],
+       [ 40,  50,  60,  70,  80],
+       [400, 500, 600, 700, 800]]), array([  9,  90, 900]))]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1var_2
0000
1110100
2220200
3330300
4440400
5550500
6660600
7770700
8880800
9990900
+ +
+
+
+
+
wl = 5
+n_vars = 3
+
+t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
+print('input shape:', t.shape)
+df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
+display(df)
+X, y = SlidingWindow(wl, horizon=1, get_y="var_0")(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (n_vars, wl))
+
+
input shape: torch.Size([10, 3])
+[(array([[  0,   1,   2,   3,   4],
+       [  0,  10,  20,  30,  40],
+       [  0, 100, 200, 300, 400]]), 5), (array([[  1,   2,   3,   4,   5],
+       [ 10,  20,  30,  40,  50],
+       [100, 200, 300, 400, 500]]), 6), (array([[  2,   3,   4,   5,   6],
+       [ 20,  30,  40,  50,  60],
+       [200, 300, 400, 500, 600]]), 7), (array([[  3,   4,   5,   6,   7],
+       [ 30,  40,  50,  60,  70],
+       [300, 400, 500, 600, 700]]), 8), (array([[  4,   5,   6,   7,   8],
+       [ 40,  50,  60,  70,  80],
+       [400, 500, 600, 700, 800]]), 9)]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1var_2
0000
1110100
2220200
3330300
4440400
5550500
6660600
7770700
8880800
9990900
+ +
+
+
+
+
wl = 5
+n_vars = 3
+
+t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
+print('input shape:', t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
+df = pd.DataFrame(t, columns=columns)
+display(df)
+X, y = SlidingWindow(wl, horizon=1, get_x=columns[:-1], get_y='target')(df)
+items = itemify(X, y)
+print(items)
+test_eq(items[0][0].shape, (n_vars-1, wl))
+test_eq(items[0][1].shape, ())
+
+
input shape: torch.Size([10, 3])
+[(array([[ 0,  1,  2,  3,  4],
+       [ 0, 10, 20, 30, 40]]), 500), (array([[ 1,  2,  3,  4,  5],
+       [10, 20, 30, 40, 50]]), 600), (array([[ 2,  3,  4,  5,  6],
+       [20, 30, 40, 50, 60]]), 700), (array([[ 3,  4,  5,  6,  7],
+       [30, 40, 50, 60, 70]]), 800), (array([[ 4,  5,  6,  7,  8],
+       [40, 50, 60, 70, 80]]), 900)]
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1target
0000
1110100
2220200
3330300
4440400
5550500
6660600
7770700
8880800
9990900
+ +
+
+
+
+
n_vars = 3
+
+t = (np.random.rand(1000, n_vars) - .5).cumsum(0)
+print(t.shape)
+plt.plot(t)
+plt.show()
+X, y = SlidingWindow(5, stride=None, horizon=0, get_x=[0,1], get_y=2)(t)
+test_eq(X[0].shape, (n_vars-1, wl))
+test_eq(y[0].shape, ())
+print(X.shape, y.shape)
+
+
(1000, 3)
+(200, 2, 5) (200,)
+
+
+
+
+

+
+
+
+
+
+
wl = 5
+n_vars = 3
+
+t = (np.random.rand(100, n_vars) - .5).cumsum(0)
+print(t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
+df = pd.DataFrame(t, columns=columns)
+display(df)
+X, y = SlidingWindow(5, horizon=0, get_x=columns[:-1], get_y='target')(df)
+test_eq(X[0].shape, (n_vars-1, wl))
+test_eq(y[0].shape, ())
+print(X.shape, y.shape)
+
+
(100, 3)
+(96, 2, 5) (96,)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1target
00.1540720.197194-0.083179
10.402744-0.248788-0.560573
20.4482090.224215-0.681264
30.6315020.406760-1.162043
41.0999730.179926-0.712690
............
95-0.4050793.662311-2.779159
96-0.4456253.488809-2.663381
97-0.1873493.304898-2.695971
98-0.1006523.505663-2.590652
990.3712723.279901-2.764369
+ +

100 rows × 3 columns

+
+
+
+
+
seq_len = 100
+n_vars = 5
+t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
+print(t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
+df = pd.DataFrame(t, columns=columns)
+display(df)
+X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)
+test_eq(X[0].shape, (n_vars-1, wl))
+test_eq(y[0].shape, ())
+print(X.shape, y.shape)
+
+
(100, 5)
+(96, 4, 5) (96,)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1var_2var_3target
00.443639-0.288128-0.0497320.2889150.325872
1-0.047608-0.0097380.0567680.5413950.017496
2-0.2439720.1022270.3613870.6283970.049012
3-0.7212660.0451040.7240620.9406930.510875
4-0.6412690.1419270.7938371.1589030.417040
..................
953.4881172.3455120.7454830.2585682.468550
963.1870061.9458440.8332280.5111982.115330
973.0198621.7398020.4887320.8813242.387837
983.3142471.9920000.1192300.7977942.327720
993.3945782.0124580.0032440.3871252.345970
+ +

100 rows × 5 columns

+
+
+
+
+
seq_len = 100
+n_vars = 5
+
+t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
+print(t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)] + ['target']
+df = pd.DataFrame(t, columns=columns).T
+display(df)
+X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)
+test_eq(X[0].shape, (n_vars-1, wl))
+test_eq(y[0].shape, ())
+print(X.shape, y.shape)
+
+
(100, 5)
+(96, 4, 5) (96,)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
0123456789...90919293949596979899
var_0-0.407162-0.742169-1.193053-1.058644-0.721243-1.056788-1.316226-1.247859-1.391482-1.258618...-2.847911-3.118643-3.444248-3.036050-2.664068-2.473782-2.508080-2.878210-2.841170-2.688932
var_10.111643-0.286318-0.221917-0.026094-0.332200-0.376518-0.1447630.2253610.4871340.435856...1.5691581.2945481.5644551.5012431.4909281.4506021.4407301.7556071.3809861.236284
var_2-0.126951-0.484267-0.480375-0.706987-0.571379-0.561959-0.717696-0.586035-0.298053-0.047405...-1.748096-1.508691-1.158258-1.116485-1.153738-1.575450-1.875091-1.613255-1.274859-1.592096
var_3-0.462238-0.748774-0.625473-0.360442-0.789178-0.530832-0.785290-0.4134520.083685-0.110964...-4.873450-4.382297-4.531454-4.087051-4.087801-4.391084-4.262526-4.650170-4.465874-4.535273
target0.2414540.084139-0.0129740.0963280.5010350.6970430.2291850.4974300.5529220.218345...-4.582426-4.194067-3.785398-3.808516-3.629740-3.398645-3.828007-3.600028-3.614195-3.592783
+ +

5 rows × 100 columns

+
+
+
+
+
seq_len = 100
+n_vars = 5
+t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
+print(t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)] + ['target']
+df = pd.DataFrame(t, columns=columns).T
+display(df)
+X, y = SlidingWindow(5, stride=None, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)
+test_eq(X[0].shape, (n_vars-1, wl))
+test_eq(y[0].shape, ())
+print(X.shape, y.shape)
+
+
(100, 5)
+(20, 4, 5) (20,)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
0123456789...90919293949596979899
var_00.210943-0.264863-0.3079420.176782-0.1882440.1188240.5933530.6114080.1763960.566034...-4.738294-5.138743-5.203979-4.835758-4.534974-4.310112-4.366365-4.328250-4.527717-4.432726
var_1-0.086375-0.4574130.0255710.4282560.6115730.319714-0.0851290.1617350.052730-0.356617...7.2035397.3005347.2679546.8389237.0541346.6125327.1082696.9660007.4079157.332567
var_20.166139-0.231839-0.468804-0.565628-0.500941-0.706951-0.881385-1.138549-0.978276-0.952727...0.3919420.8023560.3956880.0332880.1472830.5899110.3608470.3220190.4781200.278228
var_3-0.234297-0.467480-0.925036-0.572783-0.3455850.149537-0.078098-0.577732-0.771975-0.322283...-1.487032-1.971348-2.300616-2.767312-2.657974-2.880908-2.567235-2.758240-2.605518-2.166444
target-0.416187-0.164800-0.283554-0.534897-0.896808-0.456572-0.889556-1.178456-0.877891-1.176442...-6.094650-6.510793-6.408799-6.685696-6.672726-6.210781-6.377436-5.974001-5.755187-5.608240
+ +

5 rows × 100 columns

+
+
+
+
+
from tsai.data.validation import TrainValidTestSplitter
+
+
+
seq_len = 100
+n_vars = 5
+t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
+print(t.shape)
+columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
+df = pd.DataFrame(t, columns=columns)
+display(df)
+X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)
+splits = TrainValidTestSplitter(valid_size=.2, shuffle=False)(y)
+X.shape, y.shape, splits
+
+
(100, 5)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1var_2var_3target
00.123248-0.0815960.0994440.447980-0.397975
10.469671-0.3344990.3078670.141345-0.131085
20.522902-0.6968170.3865970.1568180.128043
30.487025-0.966153-0.050574-0.248479-0.088962
40.396284-1.319821-0.113121-0.3792270.313690
..................
956.138836-1.6029171.7130491.421797-1.873899
965.892472-1.8969141.4011371.065859-2.239942
975.421917-1.7285681.4812700.998533-2.157474
985.763120-1.4043301.9313611.295956-1.934397
995.827842-1.7624381.8317121.014259-1.831573
+ +

100 rows × 5 columns

+
+
+
+
((96, 4, 5),
+ (96,),
+ ((#77) [0,1,2,3,4,5,6,7,8,9...], (#19) [77,78,79,80,81,82,83,84,85,86...]))
+
+
+
+
data = np.concatenate([np.linspace(0, 1, 11).reshape(-1,1).repeat(2, 1), np.arange(11).reshape(-1,1)], -1)
+df_test = pd.DataFrame(data, columns=['col1', 'col2', 'target'])
+df_test['target'] = df_test['target'].astype(int)
+df_test
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
col1col2target
00.00.00
10.10.11
20.20.22
30.30.33
40.40.44
50.50.55
60.60.66
70.70.77
80.80.88
90.90.99
101.01.010
+ +
+
+
+
+
def _y_func(o): return o[:, 0]
+
+
+
for wl in np.arange(1, 20):
+    x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=_y_func)(df_test)
+    test_eq(x.shape[0], math.ceil((len(df_test))/wl))
+    test_eq(x.shape[0], y.shape[0])
+    test_eq(x.shape[2], wl)
+    test_close(x[:, 0, 0]*10, y)
+
+
+
for wl in np.arange(1, 20):
+    x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)
+    test_eq(x.shape[0], math.ceil((len(df_test))/ wl))
+    test_eq(x.shape[0], y.shape[0])
+    test_eq(x.shape[2], wl)
+
+
+
for wl in np.arange(1, len(df_test)+1):
+    x, y = SlidingWindow(wl, None, pad_remainder=False, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)
+    test_eq(x.shape[0], len(df_test) // wl)
+    test_eq(x.shape[0], y.shape[0])
+    test_eq(x.shape[2], wl)
+
+
+
for wl in np.arange(1, 20):
+    x, _ = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=[], horizon=0)(df_test)
+    test_eq(x.shape[0], math.ceil((len(df_test))/wl))
+    test_eq(x.shape[2], wl)
+
+
+
for wl in np.arange(2, len(df_test)):
+    x, _ = SlidingWindow(wl, wl, pad_remainder=False, get_x=['col1', 'col2'], get_y=[], horizon=0)(df_test)
+    test_eq(x.shape[0], len(df_test) // wl)
+    test_eq(x.shape[2], wl)
+
+
+
df = pd.DataFrame()
+df['sample_id'] = np.concatenate([np.ones(n)*(i + 1) for i,n in enumerate([13])])
+df['var1'] = df['sample_id'] + df.index.values - 1
+df['var2'] = df['var1'] * 10
+df['target'] = (df['var1']).astype(int)
+df['sample_id'] = df['sample_id'].astype(int)
+df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sample_idvar1var2target
010.00.00
111.010.01
212.020.02
313.030.03
414.040.04
515.050.05
616.060.06
717.070.07
818.080.08
919.090.09
10110.0100.010
11111.0110.011
12112.0120.012
+ +
+
+
+
+
X, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=False, padding="pre", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (2, 2, 3))
+test_eq(y.shape, (2, 4))
+X, y
+
+
(array([[[ 4.,  5.,  6.],
+         [40., 50., 60.]],
+ 
+        [[ 6.,  7.,  8.],
+         [60., 70., 80.]]]),
+ array([[ 7,  8,  9, 10],
+        [ 9, 10, 11, 12]]))
+
+
+
+
X, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=True, padding="pre", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (3, 2, 3))
+test_eq(y.shape, (3, 4))
+X, y
+
+
(array([[[nan,  3.,  4.],
+         [nan, 30., 40.]],
+ 
+        [[ 4.,  5.,  6.],
+         [40., 50., 60.]],
+ 
+        [[ 6.,  7.,  8.],
+         [60., 70., 80.]]]),
+ array([[ 5,  6,  7,  8],
+        [ 7,  8,  9, 10],
+        [ 9, 10, 11, 12]]))
+
+
+
+
X, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=False, padding="post", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (2, 2, 3))
+test_eq(y.shape, (2, 4))
+X, y
+
+
(array([[[ 3.,  4.,  5.],
+         [30., 40., 50.]],
+ 
+        [[ 5.,  6.,  7.],
+         [50., 60., 70.]]]),
+ array([[ 6,  7,  8,  9],
+        [ 8,  9, 10, 11]]))
+
+
+
+
X, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=True, padding="post", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (3, 2, 3))
+test_eq(y.shape, (3, 4))
+X, y
+
+
(array([[[ 3.,  4.,  5.],
+         [30., 40., 50.]],
+ 
+        [[ 5.,  6.,  7.],
+         [50., 60., 70.]],
+ 
+        [[ 7.,  8.,  9.],
+         [70., 80., 90.]]]),
+ array([[ 6.,  7.,  8.,  9.],
+        [ 8.,  9., 10., 11.],
+        [10., 11., 12., nan]]))
+
+
+
+
X, y = SlidingWindow(window_len=10, stride=2, start=3, pad_remainder=True, padding="pre", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (1, 2, 10))
+test_eq(y.shape, (1, 4))
+X, y
+
+
(array([[[nan, nan, nan, nan,  3.,  4.,  5.,  6.,  7.,  8.],
+         [nan, nan, nan, nan, 30., 40., 50., 60., 70., 80.]]]),
+ array([[ 9, 10, 11, 12]]))
+
+
+
+
X, y = SlidingWindow(window_len=10, stride=2, start=3, pad_remainder=True, padding="post", padding_value=np.nan, add_padding_feature=False,
+                     get_x=["var1", "var2"], get_y=["target"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,
+                     ascending=True, check_leakage=True)(df)
+test_eq(X.shape, (1, 2, 10))
+test_eq(y.shape, (1, 4))
+X, y
+
+
(array([[[  3.,   4.,   5.,   6.,   7.,   8.,   9.,  10.,  11.,  12.],
+         [ 30.,  40.,  50.,  60.,  70.,  80.,  90., 100., 110., 120.]]]),
+ array([[nan, nan, nan, nan]]))
+
+
+
+

source

+
+
+

SlidingWindowPanel

+
+
 SlidingWindowPanel (window_len:int, unique_id_cols:list,
+                     stride:Optional[int]=1, start:int=0,
+                     pad_remainder:bool=False, padding:str='post',
+                     padding_value:float=nan,
+                     add_padding_feature:bool=True,
+                     get_x:Union[NoneType,int,list]=None,
+                     get_y:Union[NoneType,int,list]=None,
+                     y_func:Optional[<built-infunctioncallable>]=None,
+                     output_processor:Optional[<built-
+                     infunctioncallable>]=None, copy:bool=False,
+                     horizon:Union[int,list]=1, seq_first:bool=True,
+                     sort_by:Optional[list]=None, ascending:bool=True,
+                     check_leakage:bool=True, return_key:bool=False,
+                     verbose:bool=True)
+
+

Applies a sliding window to a pd.DataFrame.

+

Args: window_len = length of lookback window unique_id_cols = pd.DataFrame columns that will be used to identify a time series for each entity. stride = n datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap) start = determines the step where the first window is applied: 0 (default), a given step (int), or random within the 1st stride (None). pad_remainder = allows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data). padding = ‘pre’ or ‘post’ (optional, defaults to ‘pre’): pad either before or after each sequence. If pad_remainder == False, it indicates the starting point to create the sequence (‘pre’ from the end, and ‘post’ from the beginning) padding_value = value (float) that will be used for padding. Default: np.nan add_padding_feature = add an additional feature indicating whether each timestep is padded (1) or not (0). horizon = number of future datapoints to predict (y). If get_y is [] horizon will be set to 0. * 0 for last step in each sub-window. * n > 0 for a range of n future steps (1 to n). * n < 0 for a range of n past steps (-n + 1 to 0). * list : for those exact timesteps. get_x = indices of columns that contain the independent variable (xs). If None, all data will be used as x. get_y = indices of columns that contain the target (ys). If None, all data will be used as y. [] means no y data is created (unlabeled data). y_func = function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1! output_processor = optional function to filter output (X (and y if available)). This is useful when some values need to be removed. The function should take X and y (even if it’s None) as arguments. copy = copy the original object to avoid changes in it. seq_first = True if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len) sort_by = column/s used for sorting the array in ascending order ascending = used in sorting check_leakage = checks if there’s leakage in the output between X and y return_key = when True, the key corresponsing to unique_id_cols for each sample is returned verbose = controls verbosity. True or 1 displays progress bar. 2 or more show records that cannot be created due to its length.

+

Input: You can use np.ndarray, pd.DataFrame or torch.Tensor as input shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)

+
+
samples = 100_000
+wl = 5
+n_vars = 10
+
+t = (torch.stack(n_vars * [torch.arange(samples)]).T * tensor([10**i for i in range(n_vars)]))
+df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
+df['time'] = np.arange(len(t))
+df['device'] = 0
+df['target'] = np.random.randint(0, 2, len(df))
+df2 = df.copy()
+df3 = df.copy()
+cols = ['var_0', 'var_1', 'var_2', 'device', 'target']
+df2[cols] = df2[cols] + 1
+df3[cols] = df3[cols] + 2
+df2 = df2.loc[:3]
+df['region'] = 'A'
+df2['region'] = 'A'
+df3['region'] = 'B'
+df = pd.concat([df, df2, df3], ignore_index=True)
+df['index'] = np.arange(len(df))
+df = df.sample(frac=1).reset_index(drop=True)
+display(df.head())
+df.shape
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
var_0var_1var_2var_3var_4var_5var_6var_7var_8var_9timedevicetargetregionindex
0860088600808600800860080008600800008600800000860080000008600800000008600800000000860080000000008600800A86008
1900039000129000102900010009000100009000100000900010000009000100000009000100000000900010000000009000122B190005
2438194381724381702438170004381700004381700000438170000004381700000004381700000000438170000000004381723B143821
3807518074928074902807490008074900008074900000807490000008074900000008074900000000807490000000008074923B180753
4849178491528491502849150008491500008491500000849150000008491500000008491500000000849150000000008491523B184919
+ +
+
+
+
(200004, 15)
+
+
+
+
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], 
+                          horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=False)(df)
+X.shape, y.shape
+
+
processing data...
+...data processed
+concatenating X...
+...X concatenated
+concatenating y...
+...y concatenated
+
+
+ + +
+
+ +
+
+
((199992, 10, 5), (199992,))
+
+
+
+
X, y, key = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], 
+                               horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=True)(df)
+X.shape, y.shape, key.shape
+
+
processing data...
+...data processed
+concatenating X...
+...X concatenated
+concatenating y...
+...y concatenated
+
+
+ + +
+
+ +
+
+
((199992, 10, 5), (199992,), (199992,))
+
+
+
+
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], 
+                          horizon=0, seq_first=True, sort_by=['time'], ascending=True)(df)
+X.shape, y.shape
+
+
processing data...
+...data processed
+concatenating X...
+...X concatenated
+concatenating y...
+...y concatenated
+
+
+ + +
+
+ +
+
+
((199992, 10, 5), (199992,))
+
+
+
+
# y_func must be a function applied to axis=1!
+def y_max(o): return np.max(o, axis=1)
+
+
+
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], 
+                          y_func=y_max, horizon=5, seq_first=True, sort_by=['time'], ascending=True)(df)
+X.shape, y.shape
+
+
processing data...
+...data processed
+concatenating X...
+...X concatenated
+concatenating y...
+...y concatenated
+
+
+ + +
+
+ +
+ + 0.00% [0/3 00:00<?] +
+ +
+
+
((199982, 10, 5), (199982,))
+
+
+
+

source

+
+
+

identify_padding

+
+
 identify_padding (float_mask, value=-1)
+
+

Identifies padded subsequences in a mask of type float

+

This function identifies as padded subsequences those where all values == nan from the end of the sequence (last dimension) across all channels, and sets those values to the selected value (default = -1)

+

Args: mask: boolean or float mask value: scalar that will be used to identify padded subsequences

+
+
wl = 5
+stride = 5
+
+t = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)
+print('input shape:', t.shape)
+X, _ = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)
+X = tensor(X)
+X[0, 1, -2:] = np.nan
+X[1,..., :3] = np.nan
+print(X)
+identify_padding(torch.isnan(X).float())
+
+
input shape: (13, 3)
+tensor([[[ 0.,  1.,  2.,  3.,  4.],
+         [ 0.,  1.,  2., nan, nan],
+         [ 0.,  1.,  2.,  3.,  4.],
+         [ 0.,  0.,  0.,  0.,  0.]],
+
+        [[nan, nan, nan,  8.,  9.],
+         [nan, nan, nan,  8.,  9.],
+         [nan, nan, nan,  8.,  9.],
+         [nan, nan, nan,  0.,  0.]],
+
+        [[10., 11., 12., nan, nan],
+         [10., 11., 12., nan, nan],
+         [10., 11., 12., nan, nan],
+         [ 0.,  0.,  0.,  1.,  1.]]])
+
+
+
tensor([[[0., 0., 0., 0., 0.],
+         [0., 0., 0., 1., 1.],
+         [0., 0., 0., 0., 0.],
+         [0., 0., 0., 0., 0.]],
+
+        [[1., 1., 1., 0., 0.],
+         [1., 1., 1., 0., 0.],
+         [1., 1., 1., 0., 0.],
+         [1., 1., 1., 0., 0.]],
+
+        [[0., 0., 0., 1., 1.],
+         [0., 0., 0., 1., 1.],
+         [0., 0., 0., 1., 1.],
+         [0., 0., 0., 0., 0.]]])
+
+
+
+
+

Forecasting data preparation

+
+

source

+
+

basic_data_preparation_fn

+
+
 basic_data_preparation_fn (df, drop_duplicates=True, datetime_col=None,
+                            use_index=False, keep='last',
+                            add_missing_datetimes=True, freq='1D',
+                            method=None, sort_by=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe to preprocess
drop_duplicatesboolTrueflag to indicate if rows with duplicate datetime info should be removed
datetime_colNoneTypeNonestr indicating the name of the column/s that contains the datetime info
use_indexboolFalseflag to indicate if the datetime info is in the index
keepstrlaststr to indicate what data should be kept in case of duplicate rows
add_missing_datetimesboolTrueflaf to indicate if missing datetimes should be added
freqstr1Dstr to indicate the frequency used in the datetime info. Used in case missing timestamps exists
methodNoneTypeNonestr indicating the method used to fill data for missing timestamps: None, ‘bfill’, ‘ffill’
sort_byNoneTypeNonestr or list of str to indicate if how to sort data. If use_index=True the index will be used to sort the dataframe.
+
+
df_len = 100
+datetime_col = 'datetime' 
+df = pd.DataFrame(np.arange(df_len), columns=['value'])
+df['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')
+df['type'] = 1
+# drop 10 rows at random
+df = df.drop(df.sample(10).index)
+# add 2 duplicated rows
+df = pd.concat([df, df.sample(2)])
+display(df)
+
+new_df = basic_data_preparation_fn(df, drop_duplicates=True, datetime_col=datetime_col, use_index=False, keep='last', 
+                                   add_missing_datetimes=True, freq='1D', method='ffill', sort_by=datetime_col)
+display(new_df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuedatetimetype
001749-03-311
111749-04-011
331749-04-031
441749-04-041
551749-04-051
............
96961749-07-051
97971749-07-061
99991749-07-081
001749-03-311
19191749-04-191
+ +

92 rows × 3 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuedatetimetype
001749-03-311
111749-04-011
211749-04-021
331749-04-031
441749-04-041
............
95951749-07-041
96961749-07-051
97971749-07-061
98971749-07-071
99991749-07-081
+ +

100 rows × 3 columns

+
+
+
+
+

source

+
+
+

check_safe_conversion

+
+
 check_safe_conversion (o, dtype='float32', cols=None)
+
+

Checks if the conversion to float is safe

+
+
assert check_safe_conversion(-2**11, 'float16') == True
+assert check_safe_conversion(-2**11 - 1, 'float16') == False
+assert check_safe_conversion(2**24, 'float32') == True
+assert check_safe_conversion(2**24+1, 'float32') == False
+assert check_safe_conversion(2**53, 'float64') == True
+assert check_safe_conversion(2**53+1, 'float64') == False
+
+df = pd.DataFrame({'a': [1, 2, 3], 'b': [2**24, 2**24+1, 2**24+2]})
+assert not check_safe_conversion(df, 'float32')
+assert check_safe_conversion(df, 'int32')
+assert check_safe_conversion(df, 'float32', cols='a')
+assert not check_safe_conversion(df, 'float32', cols='b')
+
+
-2147483648 1 3 2147483647
+-2147483648 16777216 16777218 2147483647
+
+
+
/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_30986/657350933.py:39: UserWarning: Unsafe conversion to float32: {'a': True, 'b': False}
+  warnings.warn(f"Unsafe conversion to {dtype}: {dict(zip(cols, checks))}")
+/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_30986/657350933.py:39: UserWarning: Unsafe conversion to float32: {'b': False}
+  warnings.warn(f"Unsafe conversion to {dtype}: {dict(zip(cols, checks))}")
+
+
+
+

source

+
+
+

prepare_forecasting_data

+
+
from tsai.data.validation import get_forecasting_splits
+
+
+
fcst_history = 10 
+fcst_horizon = 5
+stride = 1
+valid_size=0.2
+test_size=0.2
+
+df = pd.DataFrame()
+df['target'] = np.arange(50)
+
+X, y = prepare_forecasting_data(df, fcst_history, fcst_horizon)
+splits = get_forecasting_splits(df, fcst_history, fcst_horizon, valid_size=valid_size, test_size=test_size, stride=stride, show_plot=False)
+assert y[splits[0]][-1][0][-1] == y[splits[1]][0][0][0] - stride
+assert y[splits[1]][-1][0][-1] == y[splits[2]][0][0][0] - stride
+for s,t in zip(splits, ['\ntrain_split:', '\nvalid_split:', '\ntest_split :']):
+    print(t)
+    for xi, yi in zip(X[s], y[s]):
+        print(xi, yi)
+
+

+train_split:
+[[0 1 2 3 4 5 6 7 8 9]] [[10 11 12 13 14]]
+[[ 1  2  3  4  5  6  7  8  9 10]] [[11 12 13 14 15]]
+[[ 2  3  4  5  6  7  8  9 10 11]] [[12 13 14 15 16]]
+[[ 3  4  5  6  7  8  9 10 11 12]] [[13 14 15 16 17]]
+[[ 4  5  6  7  8  9 10 11 12 13]] [[14 15 16 17 18]]
+[[ 5  6  7  8  9 10 11 12 13 14]] [[15 16 17 18 19]]
+[[ 6  7  8  9 10 11 12 13 14 15]] [[16 17 18 19 20]]
+[[ 7  8  9 10 11 12 13 14 15 16]] [[17 18 19 20 21]]
+[[ 8  9 10 11 12 13 14 15 16 17]] [[18 19 20 21 22]]
+[[ 9 10 11 12 13 14 15 16 17 18]] [[19 20 21 22 23]]
+[[10 11 12 13 14 15 16 17 18 19]] [[20 21 22 23 24]]
+[[11 12 13 14 15 16 17 18 19 20]] [[21 22 23 24 25]]
+[[12 13 14 15 16 17 18 19 20 21]] [[22 23 24 25 26]]
+[[13 14 15 16 17 18 19 20 21 22]] [[23 24 25 26 27]]
+[[14 15 16 17 18 19 20 21 22 23]] [[24 25 26 27 28]]
+[[15 16 17 18 19 20 21 22 23 24]] [[25 26 27 28 29]]
+
+valid_split:
+[[20 21 22 23 24 25 26 27 28 29]] [[30 31 32 33 34]]
+[[21 22 23 24 25 26 27 28 29 30]] [[31 32 33 34 35]]
+[[22 23 24 25 26 27 28 29 30 31]] [[32 33 34 35 36]]
+[[23 24 25 26 27 28 29 30 31 32]] [[33 34 35 36 37]]
+[[24 25 26 27 28 29 30 31 32 33]] [[34 35 36 37 38]]
+[[25 26 27 28 29 30 31 32 33 34]] [[35 36 37 38 39]]
+
+test_split :
+[[30 31 32 33 34 35 36 37 38 39]] [[40 41 42 43 44]]
+[[31 32 33 34 35 36 37 38 39 40]] [[41 42 43 44 45]]
+[[32 33 34 35 36 37 38 39 40 41]] [[42 43 44 45 46]]
+[[33 34 35 36 37 38 39 40 41 42]] [[43 44 45 46 47]]
+[[34 35 36 37 38 39 40 41 42 43]] [[44 45 46 47 48]]
+[[35 36 37 38 39 40 41 42 43 44]] [[45 46 47 48 49]]
+
+
+
+
fcst_history = 10 
+fcst_horizon = 5
+stride = 1
+valid_size=0.2
+test_size=0.2
+
+df = pd.DataFrame()
+df['target'] = np.arange(50)
+
+X, y = prepare_forecasting_data(df, fcst_history, fcst_horizon, x_vars=None, y_vars=[])
+splits = get_forecasting_splits(df, fcst_history, fcst_horizon, valid_size=valid_size, test_size=test_size, stride=stride, show_plot=False)
+assert y is None
+
+
+
df_len = 100
+n_values = 3
+datetime_col = 'datetime' 
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**i).astype(np.float32)
+display(df)
+
+fcst_history = 10
+fcst_horizon = 5
+x_vars = df.columns
+y_vars = None
+dtype = None
+
+X, y = prepare_forecasting_data(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, x_vars=x_vars, y_vars=y_vars, dtype=dtype)
+test_eq(X.shape, (86, 3, 10))
+test_eq(y.shape, (86, 3, 5))
+test_eq(y[:3, :, 0],  X[:3, :, -1] + np.array([1, 10, 100]).reshape(1, 1, -1))
+print(X[:3].astype(int))
+print(y[:3].astype(int))
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
value_0value_1value_2
00.00.00.0
11.010.0100.0
22.020.0200.0
33.030.0300.0
44.040.0400.0
............
9595.0950.09500.0
9696.0960.09600.0
9797.0970.09700.0
9898.0980.09800.0
9999.0990.09900.0
+ +

100 rows × 3 columns

+
+
+
+
[[[   0    1    2    3    4    5    6    7    8    9]
+  [   0   10   20   30   40   50   60   70   80   90]
+  [   0  100  200  300  400  500  600  700  800  900]]
+
+ [[   1    2    3    4    5    6    7    8    9   10]
+  [  10   20   30   40   50   60   70   80   90  100]
+  [ 100  200  300  400  500  600  700  800  900 1000]]
+
+ [[   2    3    4    5    6    7    8    9   10   11]
+  [  20   30   40   50   60   70   80   90  100  110]
+  [ 200  300  400  500  600  700  800  900 1000 1100]]]
+[[[  10   11   12   13   14]
+  [ 100  110  120  130  140]
+  [1000 1100 1200 1300 1400]]
+
+ [[  11   12   13   14   15]
+  [ 110  120  130  140  150]
+  [1100 1200 1300 1400 1500]]
+
+ [[  12   13   14   15   16]
+  [ 120  130  140  150  160]
+  [1200 1300 1400 1500 1600]]]
+
+
+
+
df_len = 100
+n_values = 3
+datetime_col = 'datetime' 
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+
+df['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')
+df['type'] = np.random.randint(0, 4, df_len)
+df['target'] = np.arange(df_len)
+display(df)
+
+fcst_history = 10
+fcst_horizon = 5
+x_vars = ['value_0', 'value_1', 'value_2', 'target']
+y_vars = 'target'
+dtype = np.float32
+
+X, y = prepare_forecasting_data(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, x_vars=x_vars, y_vars=y_vars, dtype=dtype)
+test_eq(X.shape, (86, 4, 10))
+test_eq(y.shape, (86, 1, 5))
+print(X[:3].astype(int))
+print(y[:3])
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
value_0value_1value_2datetimetypetarget
00.00.00.01749-03-3130
110.0100.01000.01749-04-0111
220.0200.02000.01749-04-0212
330.0300.03000.01749-04-0313
440.0400.04000.01749-04-0424
.....................
95950.09500.095000.01749-07-04095
96960.09600.096000.01749-07-05096
97970.09700.097000.01749-07-06397
98980.09800.098000.01749-07-07298
99990.09900.099000.01749-07-08199
+ +

100 rows × 6 columns

+
+
+
+
[[[    0    10    20    30    40    50    60    70    80    90]
+  [    0   100   200   300   400   500   600   700   800   900]
+  [    0  1000  2000  3000  4000  5000  6000  7000  8000  9000]
+  [    0     1     2     3     4     5     6     7     8     9]]
+
+ [[   10    20    30    40    50    60    70    80    90   100]
+  [  100   200   300   400   500   600   700   800   900  1000]
+  [ 1000  2000  3000  4000  5000  6000  7000  8000  9000 10000]
+  [    1     2     3     4     5     6     7     8     9    10]]
+
+ [[   20    30    40    50    60    70    80    90   100   110]
+  [  200   300   400   500   600   700   800   900  1000  1100]
+  [ 2000  3000  4000  5000  6000  7000  8000  9000 10000 11000]
+  [    2     3     4     5     6     7     8     9    10    11]]]
+[[[10. 11. 12. 13. 14.]]
+
+ [[11. 12. 13. 14. 15.]]
+
+ [[12. 13. 14. 15. 16.]]]
+
+
+
+

source

+
+
+

get_today

+
+
 get_today (datetime_format='%Y-%m-%d')
+
+
+
test_eq(get_today(), dt.datetime.today().strftime("%Y-%m-%d"))
+
+
+

source

+
+
+

split_fcst_datetime

+
+
 split_fcst_datetime (fcst_datetime)
+
+

Define fcst start and end dates

+ + + + + + + + + + + + + +
Details
fcst_datetimestr or list of str with datetime
+
+
test_eq(split_fcst_datetime(None), (None, None))
+test_eq(split_fcst_datetime('2020-01-01'), ('2020-01-01', '2020-01-01'))
+test_eq(split_fcst_datetime(['2019-01-01', '2020-01-01']), ['2019-01-01', '2020-01-01'])
+
+
+

source

+
+
+

set_df_datetime

+
+
 set_df_datetime (df, datetime_col=None, use_index=False)
+
+

Make sure datetime column or index is of the right date type.

+
+
# Test
+df_len = 100
+n_values = 3
+datetime_col = 'datetime'
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+df['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')
+set_df_datetime(df, datetime_col=datetime_col)
+test_eq(df['datetime'].dtypes, np.dtype('datetime64[ns]'))
+df_index = df.set_index('datetime')
+set_df_datetime(df_index, use_index=True)
+test_eq(df_index.index.dtype, np.dtype('datetime64[ns]'))
+
+
+

source

+
+
+

get_df_datetime_bounds

+
+
 get_df_datetime_bounds (df, datetime_col=None, use_index=False)
+
+

Returns the start date and and dates used by the forecast

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing forecasting data
datetime_colNoneTypeNonestr data column containing the datetime
use_indexboolFalsebool flag to indicate if index should be used to get column
+
+
# Test
+df_len = 100
+n_values = 3
+datetime_col = 'datetime'
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+df['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')
+test_eq(get_df_datetime_bounds(df, datetime_col=datetime_col), (df['datetime'].min(), df['datetime'].max()))
+df_index = df.set_index('datetime')
+test_eq(get_df_datetime_bounds(df_index, use_index=True), (df_index.index.min(), df_index.index.max()))
+
+
+

source

+
+
+

get_fcst_bounds

+
+
 get_fcst_bounds (df, fcst_datetime, fcst_history=None, fcst_horizon=None,
+                  freq='D', datetime_format='%Y-%m-%d', datetime_col=None,
+                  use_index=False)
+
+

Returns the start and end datetimes used by the forecast

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing forecasting data
fcst_datetimedatetime for which a fcst is created. Optionally tuple of datatimes if the fcst is created for a range of dates.
fcst_historyNoneTypeNone# steps used as input
fcst_horizonNoneTypeNone# predicted steps
freqstrDdatetime units. May contain a letters only or a combination of ints + letters: eg. “7D”
datetime_formatstr%Y-%m-%dformat used to convert “today”
datetime_colNoneTypeNonestr data column containing the datetime
use_indexboolFalsebool flag to indicate if index should be used to get column
+
+
from datetime import timedelta
+
+
+
# Test
+df_len = 100
+n_values = 3
+datetime_col = 'datetime'
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+freq = "7D"
+today = pd.Timestamp(get_today()).floor(freq)
+df['datetime'] = pd.date_range(None, today, periods=df_len, freq=freq)
+display(df)
+max_dt = pd.Timestamp(df['datetime'].max()).floor(freq)
+fcst_history = 30
+fcst_horizon = 10
+fcst_datetime = max_dt - timedelta(weeks=fcst_horizon)
+print('fcst_datetime :', fcst_datetime)
+start_datetime, end_datetime = get_fcst_bounds(df, fcst_datetime, datetime_col=datetime_col, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq)
+print('start_datetime:', start_datetime)
+print('end_datetime  :', end_datetime)
+dates = pd.date_range(start_datetime, end_datetime, freq=freq)
+print(dates)
+test_eq(len(dates), fcst_history + fcst_horizon)
+test_eq(end_datetime, max_dt)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
value_0value_1value_2datetime
00.00.00.02021-11-25
110.0100.01000.02021-12-02
220.0200.02000.02021-12-09
330.0300.03000.02021-12-16
440.0400.04000.02021-12-23
...............
95950.09500.095000.02023-09-21
96960.09600.096000.02023-09-28
97970.09700.097000.02023-10-05
98980.09800.098000.02023-10-12
99990.09900.099000.02023-10-19
+ +

100 rows × 4 columns

+
+
+
+
fcst_datetime : 2023-08-10 00:00:00
+start_datetime: 2023-01-19 00:00:00
+end_datetime  : 2023-10-19 00:00:00
+DatetimeIndex(['2023-01-19', '2023-01-26', '2023-02-02', '2023-02-09',
+               '2023-02-16', '2023-02-23', '2023-03-02', '2023-03-09',
+               '2023-03-16', '2023-03-23', '2023-03-30', '2023-04-06',
+               '2023-04-13', '2023-04-20', '2023-04-27', '2023-05-04',
+               '2023-05-11', '2023-05-18', '2023-05-25', '2023-06-01',
+               '2023-06-08', '2023-06-15', '2023-06-22', '2023-06-29',
+               '2023-07-06', '2023-07-13', '2023-07-20', '2023-07-27',
+               '2023-08-03', '2023-08-10', '2023-08-17', '2023-08-24',
+               '2023-08-31', '2023-09-07', '2023-09-14', '2023-09-21',
+               '2023-09-28', '2023-10-05', '2023-10-12', '2023-10-19'],
+              dtype='datetime64[ns]', freq='7D')
+
+
+
+

source

+
+
+

filter_df_by_datetime

+
+
 filter_df_by_datetime (df, start_datetime=None, end_datetime=None,
+                        datetime_col=None, use_index=False)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing forecasting data
start_datetimeNoneTypeNonelower datetime bound
end_datetimeNoneTypeNoneupper datetime bound
datetime_colNoneTypeNonestr data column containing the datetime
use_indexboolFalsebool flag to indicate if index should be used to get column
+
+
# Test
+df_len = 100
+n_values = 3
+datetime_col = 'datetime'
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+freq = "7D"
+df['datetime'] = pd.date_range(None, pd.Timestamp(get_today()).floor(freq), periods=df_len, freq=freq)
+display(df)
+max_dt = pd.Timestamp(df['datetime'].max()).floor(freq)
+fcst_history = 30
+fcst_horizon = 10
+fcst_datetime = pd.date_range(end=fcst_datetime, periods=fcst_horizon + 1, freq=freq).floor(freq)[-1]
+start_datetime, end_datetime = get_fcst_bounds(df, fcst_datetime, datetime_col=datetime_col, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq)
+test_eq(len(filter_df_by_datetime(df, start_datetime=start_datetime, end_datetime=end_datetime, datetime_col=datetime_col)), fcst_history + fcst_horizon)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
value_0value_1value_2datetime
00.00.00.02021-11-25
110.0100.01000.02021-12-02
220.0200.02000.02021-12-09
330.0300.03000.02021-12-16
440.0400.04000.02021-12-23
...............
95950.09500.095000.02023-09-21
96960.09600.096000.02023-09-28
97970.09700.097000.02023-10-05
98980.09800.098000.02023-10-12
99990.09900.099000.02023-10-19
+ +

100 rows × 4 columns

+
+
+
+
+

source

+
+
+

get_fcst_data_from_df

+
+
 get_fcst_data_from_df (df, fcst_datetime, fcst_history=None,
+                        fcst_horizon=None, freq='D',
+                        datetime_format='%Y-%m-%d', datetime_col=None,
+                        use_index=False)
+
+

Get forecasting data from a dataframe

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing forecasting data
fcst_datetimedatetime for which a fcst is created. Optionally tuple of datatimes if the fcst is created for a range of dates.
fcst_historyNoneTypeNone# steps used as input
fcst_horizonNoneTypeNone# predicted steps
freqstrDdatetime units. May contain a letters only or a combination of ints + letters: eg. “7D”
datetime_formatstr%Y-%m-%dformat used to convert “today”
datetime_colNoneTypeNonestr data column containing the datetime
use_indexboolFalsebool flag to indicate if index should be used to get column
+
+
# Test
+df_len = 100
+n_values = 3
+datetime_col = 'datetime'
+df = pd.DataFrame()
+for i in range(n_values):
+    df[f"value_{i}"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)
+freq = "7D"
+df['datetime'] = pd.date_range(None, pd.Timestamp(get_today()).floor(freq), periods=df_len, freq=freq)
+display(df)
+max_dt = pd.Timestamp(df['datetime'].max()).floor(freq)
+fcst_history = 30
+fcst_horizon = 10
+fcst_datetime = pd.date_range(end=fcst_datetime, periods=fcst_horizon + 1, freq=freq).floor(freq)[-1]
+test_eq(len(get_fcst_data_from_df(df, fcst_datetime, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq, datetime_col=datetime_col)), 
+                                  fcst_history + fcst_horizon)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
value_0value_1value_2datetime
00.00.00.02021-11-25
110.0100.01000.02021-12-02
220.0200.02000.02021-12-09
330.0300.03000.02021-12-16
440.0400.04000.02021-12-23
...............
95950.09500.095000.02023-09-21
96960.09600.096000.02023-09-28
97970.09700.097000.02023-10-05
98980.09800.098000.02023-10-12
99990.09900.099000.02023-10-19
+ +

100 rows × 4 columns

+
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.preprocessing.html b/data.preprocessing.html new file mode 100644 index 000000000..f04c7e70a --- /dev/null +++ b/data.preprocessing.html @@ -0,0 +1,6660 @@ + + + + + + + + + +tsai - Data preprocessing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Data preprocessing

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functions used to preprocess time series (both X and y).

+
+
+
from tsai.data.external import get_UCR_data
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms = [None, Categorize()]
+dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
+
+
+

source

+
+

ToNumpyCategory

+
+
 ToNumpyCategory (**kwargs)
+
+

Categorize a numpy batch

+
+
t = ToNumpyCategory()
+y_cat = t(y)
+y_cat[:10]
+
+
array([3, 2, 2, 3, 2, 4, 0, 5, 2, 1])
+
+
+
+
test_eq(t.decode(tensor(y_cat)), y)
+test_eq(t.decode(np.array(y_cat)), y)
+
+
+

source

+
+
+

OneHot

+
+
 OneHot (n_classes=None, **kwargs)
+
+

One-hot encode/ decode a batch

+
+
oh_encoder = OneHot()
+y_cat = ToNumpyCategory()(y)
+oht = oh_encoder(y_cat)
+oht[:10]
+
+
array([[0., 0., 0., 1., 0., 0.],
+       [0., 0., 1., 0., 0., 0.],
+       [0., 0., 1., 0., 0., 0.],
+       [0., 0., 0., 1., 0., 0.],
+       [0., 0., 1., 0., 0., 0.],
+       [0., 0., 0., 0., 1., 0.],
+       [1., 0., 0., 0., 0., 0.],
+       [0., 0., 0., 0., 0., 1.],
+       [0., 0., 1., 0., 0., 0.],
+       [0., 1., 0., 0., 0., 0.]])
+
+
+
+
n_classes = 10
+n_samples = 100
+
+t = torch.randint(0, n_classes, (n_samples,))
+oh_encoder = OneHot()
+oht = oh_encoder(t)
+test_eq(oht.shape, (n_samples, n_classes))
+test_eq(torch.argmax(oht, dim=-1), t)
+test_eq(oh_encoder.decode(oht), t)
+
+
+
n_classes = 10
+n_samples = 100
+
+a = np.random.randint(0, n_classes, (n_samples,))
+oh_encoder = OneHot()
+oha = oh_encoder(a)
+test_eq(oha.shape, (n_samples, n_classes))
+test_eq(np.argmax(oha, axis=-1), a)
+test_eq(oh_encoder.decode(oha), a)
+
+
+

source

+
+
+

TSNan2Value

+
+
 TSNan2Value (value=0, median=False, by_sample_and_var=True,
+              sel_vars=None)
+
+

Replaces any nan values by a predefined value or median

+
+
o = TSTensor(torch.randn(16, 10, 100))
+o[0,0] = float('nan')
+o[o > .9] = float('nan')
+o[[0,1,5,8,14,15], :, -20:] = float('nan')
+nan_vals1 = torch.isnan(o).sum()
+o2 = Pipeline(TSNan2Value(), split_idx=0)(o.clone())
+o3 = Pipeline(TSNan2Value(median=True, by_sample_and_var=True), split_idx=0)(o.clone())
+o4 = Pipeline(TSNan2Value(median=True, by_sample_and_var=False), split_idx=0)(o.clone())
+nan_vals2 = torch.isnan(o2).sum()
+nan_vals3 = torch.isnan(o3).sum()
+nan_vals4 = torch.isnan(o4).sum()
+test_ne(nan_vals1, 0)
+test_eq(nan_vals2, 0)
+test_eq(nan_vals3, 0)
+test_eq(nan_vals4, 0)
+
+
+
o = TSTensor(torch.randn(16, 10, 100))
+o[o > .9] = float('nan')
+o = TSNan2Value(median=True, sel_vars=[0,1,2,3,4])(o)
+test_eq(torch.isnan(o[:, [0,1,2,3,4]]).sum().item(), 0)
+
+
+

source

+
+
+

TSStandardize

+
+
 TSStandardize (mean=None, std=None, by_sample=False, by_var=False,
+                by_step=False, exc_vars=None, eps=1e-08,
+                use_single_batch=True, verbose=False, **kwargs)
+
+

Standardizes batch of type TSTensor

+

Args: - mean: you can pass a precalculated mean value as a torch tensor which is the one that will be used, or leave as None, in which case it will be estimated using a batch. - std: you can pass a precalculated std value as a torch tensor which is the one that will be used, or leave as None, in which case it will be estimated using a batch. If both mean and std values are passed when instantiating TSStandardize, the rest of arguments won’t be used. - by_sample: if True, it will calculate mean and std for each individual sample. Otherwise based on the entire batch. - by_var: * False: mean and std will be the same for all variables. * True: a mean and std will be be different for each variable. * a list of ints: (like [0,1,3]) a different mean and std will be set for each variable on the list. Variables not included in the list won’t be standardized. * a list that contains a list/lists: (like[0, [1,3]]) a different mean and std will be set for each element of the list. If multiple elements are included in a list, the same mean and std will be set for those variable in the sublist/s. (in the example a mean and std is determined for variable 0, and another one for variables 1 & 3 - the same one). Variables not included in the list won’t be standardized. - by_step: if False, it will standardize values for each time step. - exc_vars: list of variables that won’t be standardized. - eps: it avoids dividing by 0 - use_single_batch: if True a single training batch will be used to calculate mean & std. Else the entire training set will be used.

+
+
batch_tfms=[TSStandardize(by_sample=True, by_var=False, verbose=True)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, batch_tfms=batch_tfms)
+xb, yb = next(iter(dls.train))
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+
+
+
exc_vars = [0, 2, 6, 8, 12]
+batch_tfms=[TSStandardize(by_var=True, exc_vars=exc_vars)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, batch_tfms=batch_tfms)
+xb, yb = next(iter(dls.train))
+test_eq(len(dls.train.after_batch.fs[0].mean.flatten()), 24)
+test_eq(len(dls.train.after_batch.fs[0].std.flatten()), 24)
+test_eq(dls.train.after_batch.fs[0].mean.flatten()[exc_vars].cpu(), torch.zeros(len(exc_vars)))
+test_eq(dls.train.after_batch.fs[0].std.flatten()[exc_vars].cpu(), torch.ones(len(exc_vars)))
+print(dls.train.after_batch.fs[0].mean.flatten().data)
+print(dls.train.after_batch.fs[0].std.flatten().data)
+
+
tensor([ 0.0000, -1.3398,  0.0000,  0.9952, -0.8438, -0.4308,  0.0000, -0.6077,
+         0.0000,  0.7781, -0.4869, -0.0969,  0.0000, -1.0620, -0.6171,  0.9253,
+        -0.7023, -0.3077, -0.5600, -1.1922, -0.7503,  0.9491, -0.7744, -0.4356])
+tensor([1.0000, 0.8743, 1.0000, 0.7510, 1.1557, 0.5370, 1.0000, 0.2666, 1.0000,
+        0.2380, 0.4047, 0.3274, 1.0000, 0.6371, 0.2798, 0.5287, 0.8642, 0.4297,
+        0.5842, 0.7581, 0.3162, 0.6739, 1.0118, 0.4958])
+
+
+
+
from tsai.data.validation import TimeSplitter
+
+
+
X_nan = np.random.rand(100, 5, 10)
+idxs = random_choice(len(X_nan), int(len(X_nan)*.5), False)
+X_nan[idxs, 0] = float('nan')
+idxs = random_choice(len(X_nan), int(len(X_nan)*.5), False)
+X_nan[idxs, 1, -10:] = float('nan')
+batch_tfms = TSStandardize(by_var=True)
+dls = get_ts_dls(X_nan, batch_tfms=batch_tfms, splits=TimeSplitter(show_plot=False)(range_of(X_nan)))
+test_eq(torch.isnan(dls.after_batch[0].mean).sum(), 0)
+test_eq(torch.isnan(dls.after_batch[0].std).sum(), 0)
+xb = first(dls.train)[0]
+test_ne(torch.isnan(xb).sum(), 0)
+test_ne(torch.isnan(xb).sum(), torch.isnan(xb).numel())
+batch_tfms = [TSStandardize(by_var=True), Nan2Value()]
+dls = get_ts_dls(X_nan, batch_tfms=batch_tfms, splits=TimeSplitter(show_plot=False)(range_of(X_nan)))
+xb = first(dls.train)[0]
+test_eq(torch.isnan(xb).sum(), 0)
+
+
+
batch_tfms=[TSStandardize(by_sample=True, by_var=False, verbose=False)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms)
+xb, yb = next(iter(dls.train))
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+xb, yb = next(iter(dls.valid))
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+
+
+
tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_sample=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=[64, 128], inplace=True)
+xb, yb = dls.train.one_batch()
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+xb, yb = dls.valid.one_batch()
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+
+
+
tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_sample=True, by_var=False, verbose=False)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=[64, 128], inplace=False)
+xb, yb = dls.train.one_batch()
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+xb, yb = dls.valid.one_batch()
+test_close(xb.mean(), 0, eps=1e-1)
+test_close(xb.std(), 1, eps=1e-1)
+
+
+

source

+
+
+

TSNormalize

+
+
 TSNormalize (min=None, max=None, range=(-1, 1), by_sample=False,
+              by_var=False, by_step=False, clip_values=True,
+              use_single_batch=True, verbose=False, **kwargs)
+
+

Normalizes batch of type TSTensor

+
+
+
+

mul_max’]

+

Built-in mutable sequence.

+

If no argument is given, the constructor creates a new empty list. The argument must be an iterable if specified.

+
+
+
+

mul_min’]

+

Built-in mutable sequence.

+

If no argument is given, the constructor creates a new empty list. The argument must be an iterable if specified.

+
+
batch_tfms = [TSNormalize()]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms)
+xb, yb = next(iter(dls.train))
+assert xb.max() <= 1
+assert xb.min() >= -1
+
+
+
batch_tfms=[TSNormalize(by_sample=True, by_var=False, verbose=False)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms)
+xb, yb = next(iter(dls.train))
+assert xb.max() <= 1
+assert xb.min() >= -1
+
+
+
batch_tfms = [TSNormalize(by_var=[0, [1, 2]], use_single_batch=False, clip_values=False, verbose=False)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms)
+xb, yb = next(iter(dls.train))
+assert xb[:, [0, 1, 2]].max() <= 1
+assert xb[:, [0, 1, 2]].min() >= -1
+
+
+

source

+
+
+

TSStandardizeTuple

+
+
 TSStandardizeTuple (x_mean, x_std, y_mean=None, y_std=None, eps=1e-05)
+
+

Standardizes X (and y if provided)

+
+
a, b = TSTensor([1., 2, 3]), TSTensor([4., 5, 6])
+mean, std = a.mean(), b.std()
+tuple_batch_tfm = TSStandardizeTuple(mean, std)
+a_tfmd, b_tfmd = tuple_batch_tfm((a, b))
+test_ne(a, a_tfmd)
+test_ne(b, b_tfmd)
+
+
+

source

+
+
+

TSCatEncode

+
+
 TSCatEncode (a, sel_var)
+
+

Encodes a variable based on a categorical array

+
+
# static input
+a = np.random.randint(10, 20, 512)[:, None, None].repeat(10, 1).repeat(28, 2)
+b = TSTensor(torch.randint(0, 30, (512,), device='cpu').unsqueeze(-1).unsqueeze(-1).repeat(1, 10, 28))
+output = TSCatEncode(a, sel_var=0)(b)
+test_eq(0 <= output[:, 0].min() <= len(np.unique(a)), True)
+test_eq(0 <= output[:, 0].max() <= len(np.unique(a)), True)
+test_eq(output[:, 0], output[:, 0, 0][:, None].repeat(1, 28))
+output[:, 0].data
+
+
tensor([[0, 0, 0,  ..., 0, 0, 0],
+        [0, 0, 0,  ..., 0, 0, 0],
+        [0, 0, 0,  ..., 0, 0, 0],
+        ...,
+        [4, 4, 4,  ..., 4, 4, 4],
+        [4, 4, 4,  ..., 4, 4, 4],
+        [0, 0, 0,  ..., 0, 0, 0]])
+
+
+
+
# non-static input
+a = np.random.randint(10, 20, 512)[:, None, None].repeat(10, 1).repeat(28, 2)
+b = TSTensor(torch.randint(0, 30, (512, 10, 28), device='cpu'))
+output = TSCatEncode(a, sel_var=0)(b)
+test_eq(0 <= output[:, 0].min() <= len(np.unique(a)), True)
+test_eq(0 <= output[:, 0].max() <= len(np.unique(a)), True)
+test_ne(output[:, 0], output[:, 0, 0][:, None].repeat(1, 28))
+output[:, 0].data
+
+
tensor([[10,  0,  0,  ...,  4,  0,  0],
+        [10,  0,  0,  ...,  0,  0,  0],
+        [ 0,  2,  0,  ...,  0, 10,  6],
+        ...,
+        [ 1,  0,  9,  ...,  0,  0,  0],
+        [ 0,  0,  5,  ...,  0,  0,  0],
+        [ 0,  0,  0,  ...,  0,  0,  5]])
+
+
+
+

source

+
+
+

TSDropFeatByKey

+
+
 TSDropFeatByKey (key_var, p, sel_vars, sel_steps=None, **kwargs)
+
+

Randomly drops selected features at selected steps based with a given probability per feature, step and a key variable

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
key_varint representing the variable that contains the key information
parray of shape (n_keys, n_features, n_steps) representing the probabilities of dropping a feature at a given step for a given key
sel_varsint or slice or list of ints or array of ints representing the variables to drop
sel_stepsNoneTypeNoneint or slice or list of ints or array of ints representing the steps to drop
kwargs
+
+
n_devices = 4
+key_var = 0
+
+for sel_vars in [1, [1], [1,3,5], slice(3, 5)]:
+    for sel_steps in [None, -1, 27, [27], [25, 26], slice(10, 20)]:
+        o = TSTensor(torch.rand(512, 10, 28))
+        o[:, key_var] = torch.randint(0, n_devices, (512, 28))
+        n_vars = 1 if isinstance(sel_vars, Integral) else len(sel_vars) if isinstance(sel_vars, list) else sel_vars.stop - sel_vars.start
+        n_steps = o.shape[-1] if sel_steps is None else 1 if isinstance(sel_steps, Integral) else \
+            len(sel_steps) if isinstance(sel_steps, list) else sel_steps.stop - sel_steps.start
+        p = torch.rand(n_devices, n_vars, n_steps) * .5 + .5
+        output = TSDropFeatByKey(key_var, p, sel_vars, sel_steps)(o)
+        assert torch.isnan(output).sum((0, 2))[sel_vars].sum() > 0
+        assert torch.isnan(output).sum((0, 2))[~np.array(np.arange(o.shape[1])[sel_vars])].sum() == 0
+
+
+

source

+
+
+

TSClipOutliers

+
+
 TSClipOutliers (min=None, max=None, by_sample=False, by_var=False,
+                 use_single_batch=False, verbose=False, **kwargs)
+
+

Clip outliers batch of type TSTensor based on the IQR

+
+
batch_tfms=[TSClipOutliers(-1, 1, verbose=True)]
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms)
+xb, yb = next(iter(dls.train))
+assert xb.max() <= 1
+assert xb.min() >= -1
+test_close(xb.min(), -1, eps=1e-1)
+test_close(xb.max(), 1, eps=1e-1)
+xb, yb = next(iter(dls.valid))
+test_close(xb.min(), -1, eps=1e-1)
+test_close(xb.max(), 1, eps=1e-1)
+
+
TSClipOutliers min=-1, max=1
+
+
+
+
+

source

+
+
+

TSClip

+
+
 TSClip (min=-6, max=6, **kwargs)
+
+

Clip batch of type TSTensor

+
+
t = TSTensor(torch.randn(10, 20, 100)*10)
+test_le(TSClip()(t).max().item(), 6)
+test_ge(TSClip()(t).min().item(), -6)
+
+
+

source

+
+
+

TSSelfMissingness

+
+
 TSSelfMissingness (sel_vars=None, **kwargs)
+
+

Applies missingness from samples in a batch to random samples in the batch for selected variables

+
+
t = TSTensor(torch.randn(10, 20, 100))
+t[t>.8] = np.nan
+t2 = TSSelfMissingness()(t.clone())
+t3 = TSSelfMissingness(sel_vars=[0,3,5,7])(t.clone())
+assert (torch.isnan(t).sum() < torch.isnan(t2).sum()) and (torch.isnan(t2).sum() >  torch.isnan(t3).sum())
+
+
+

source

+
+
+

TSRobustScale

+
+
 TSRobustScale (median=None, iqr=None, quantile_range=(25.0, 75.0),
+                use_single_batch=True, exc_vars=None, eps=1e-08,
+                verbose=False, **kwargs)
+
+

This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range)

+
+
batch_tfms = TSRobustScale(verbose=True, use_single_batch=False)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=batch_tfms, num_workers=0)
+xb, yb = next(iter(dls.train))
+xb.min()
+
+
TSRobustScale median=torch.Size([1, 24, 1]) iqr=torch.Size([1, 24, 1])
+
+
+
TSTensor([-2.3502116203308105], device=cpu, dtype=torch.float32)
+
+
+
+
exc_vars = [0, 2, 6, 8, 12]
+batch_tfms = TSRobustScale(use_single_batch=False, exc_vars=exc_vars)
+dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=batch_tfms, num_workers=0)
+xb, yb = next(iter(dls.train))
+test_eq(len(dls.train.after_batch.fs[0].median.flatten()), 24)
+test_eq(len(dls.train.after_batch.fs[0].iqr.flatten()), 24)
+test_eq(dls.train.after_batch.fs[0].median.flatten()[exc_vars].cpu(), torch.zeros(len(exc_vars)))
+test_eq(dls.train.after_batch.fs[0].iqr.flatten()[exc_vars].cpu(), torch.ones(len(exc_vars)))
+print(dls.train.after_batch.fs[0].median.flatten().data)
+print(dls.train.after_batch.fs[0].iqr.flatten().data)
+
+
tensor([ 0.0000, -1.7305,  0.0000,  0.7365, -1.2736, -0.5528,  0.0000, -0.7074,
+         0.0000,  0.7087, -0.7014, -0.1120,  0.0000, -1.3332, -0.5958,  0.7563,
+        -1.0129, -0.3985, -0.5186, -1.5125, -0.7353,  0.7326, -1.1495, -0.5359])
+tensor([1.0000, 4.2788, 1.0000, 4.8008, 8.0682, 2.2777, 1.0000, 0.6955, 1.0000,
+        1.4875, 2.6386, 1.4756, 1.0000, 2.9811, 1.2507, 3.2291, 5.9906, 1.9098,
+        1.3428, 3.6368, 1.3689, 4.4213, 6.9907, 2.1939])
+
+
+
+

source

+
+
+

TSGaussianStandardize

+
+
 TSGaussianStandardize (E_mean:np.ndarray, S_mean:np.ndarray,
+                        E_std:np.ndarray, S_std:np.ndarray, eps=1e-08,
+                        split_idx=0, **kwargs)
+
+

Scales each batch using modeled mean and std based on UNCERTAINTY MODELING FOR OUT-OF-DISTRIBUTION GENERALIZATION https://arxiv.org/abs/2202.03958

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
E_meannp.ndarrayMean expected value
S_meannp.ndarrayUncertainty (standard deviation) of the mean
E_stdnp.ndarrayStandard deviation expected value
S_stdnp.ndarrayUncertainty (standard deviation) of the standard deviation
epsfloat1e-08(epsilon) small amount added to standard deviation to avoid deviding by zero
split_idxint0Flag to indicate to which set is this transofrm applied. 0: training, 1:validation, None:both
kwargs
+
+

source

+
+
+

get_random_stats

+
+
 get_random_stats (E_mean, S_mean, E_std, S_std)
+
+
+

source

+
+
+

get_stats_with_uncertainty

+
+
 get_stats_with_uncertainty (o, sel_vars=None,
+                             sel_vars_zero_mean_unit_var=False, bs=64,
+                             n_trials=None, axis=(0, 2))
+
+
+
arr = np.random.rand(1000, 2, 50)
+E_mean, S_mean, E_std, S_std = get_stats_with_uncertainty(arr, sel_vars=None, bs=64, n_trials=None, axis=(0,2))
+new_mean, new_std = get_random_stats(E_mean, S_mean, E_std, S_std)
+new_mean2, new_std2 = get_random_stats(E_mean, S_mean, E_std, S_std)
+test_ne(new_mean, new_mean2)
+test_ne(new_std, new_std2)
+test_eq(new_mean.shape, (1, 2, 1))
+test_eq(new_std.shape, (1, 2, 1))
+new_mean, new_std
+
+ + +
+
+ +
+ + 100.00% [15/15 00:00<00:00] +
+ +
+
+
(array([[[0.49649504],
+         [0.49636062]]]),
+ array([[[0.28626438],
+         [0.28665599]]]))
+
+
+

TSGaussianStandardize can be used jointly with TSStandardized in the following way:

+
X, y, splits = get_UCR_data('LSST', split_data=False)
+tfms = [None, TSClassification()]
+E_mean, S_mean, E_std, S_std = get_stats_with_uncertainty(X, sel_vars=None, bs=64, n_trials=None, axis=(0,2))
+batch_tfms = [TSGaussianStandardize(E_mean, S_mean, E_std, S_std, split_idx=0), TSStandardize(E_mean, S_mean, split_idx=1)]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=[32, 64])
+learn = ts_learner(dls, InceptionTimePlus, metrics=accuracy, cbs=[ShowGraph()])
+learn.fit_one_cycle(1, 1e-2)
+

In this way the train batches are scaled based on mean and standard deviation distributions while the valid batches are scaled with a fixed mean and standard deviation values.

+

The intent is to improve out-of-distribution performance. This method is inspired by UNCERTAINTY MODELING FOR OUT-OF-DISTRIBUTION GENERALIZATION https://arxiv.org/abs/2202.03958.

+
+

source

+
+
+

TSDiff

+
+
 TSDiff (lag=1, pad=True, **kwargs)
+
+

Differences batch of type TSTensor

+
+
t = TSTensor(torch.arange(24).reshape(2,3,4))
+test_eq(TSDiff()(t)[..., 1:].float().mean(), 1)
+test_eq(TSDiff(lag=2, pad=False)(t).float().mean(), 2)
+
+
+

source

+
+
+

TSLog

+
+
 TSLog (ex=None, **kwargs)
+
+

Log transforms batch of type TSTensor + 1. Accepts positive and negative numbers

+
+
t = TSTensor(torch.rand(2,3,4)) * 2 - 1 
+tfm = TSLog()
+enc_t = tfm(t)
+test_ne(enc_t, t)
+test_close(tfm.decodes(enc_t).data, t.data)
+
+
+

source

+
+
+

TSCyclicalPosition

+
+
 TSCyclicalPosition (cyclical_var=None, magnitude=None, drop_var=False,
+                     **kwargs)
+
+

Concatenates the position along the sequence as 2 additional variables (sine and cosine)

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
cyclical_varNoneTypeNoneOptional variable to indicate the steps withing the cycle (ie minute of the day)
magnitudeNoneTypeNoneAdded for compatibility. It’s not used.
drop_varboolFalseFlag to indicate if the cyclical var is removed
kwargs
+
+
bs, c_in, seq_len = 1,3,100
+t = TSTensor(torch.rand(bs, c_in, seq_len))
+enc_t = TSCyclicalPosition()(t)
+test_ne(enc_t, t)
+assert t.shape[1] == enc_t.shape[1] - 2
+plt.plot(enc_t[0, -2:].cpu().numpy().T)
+plt.show()
+
+
+
+

+
+
+
+
+
+
bs, c_in, seq_len = 1,3,100
+t1 = torch.rand(bs, c_in, seq_len)
+t2 = torch.arange(seq_len)
+t2 = torch.cat([t2[35:], t2[:35]]).reshape(1, 1, -1)
+t = TSTensor(torch.cat([t1, t2], 1))
+mask = torch.rand_like(t) > .8
+t[mask] = np.nan
+enc_t = TSCyclicalPosition(3)(t)
+test_ne(enc_t, t)
+assert t.shape[1] == enc_t.shape[1] - 2
+plt.plot(enc_t[0, -2:].cpu().numpy().T)
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSLinearPosition

+
+
 TSLinearPosition (linear_var:int=None, var_range:tuple=None,
+                   magnitude=None, drop_var:bool=False,
+                   lin_range:tuple=(-1, 1), **kwargs)
+
+

Concatenates the position along the sequence as 1 additional variable

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
linear_varintNoneOptional variable to indicate the steps withing the cycle (ie minute of the day)
var_rangetupleNoneOptional range indicating min and max values of the linear variable
magnitudeNoneTypeNoneAdded for compatibility. It’s not used.
drop_varboolFalseFlag to indicate if the cyclical var is removed
lin_rangetuple(-1, 1)
kwargs
+
+
bs, c_in, seq_len = 1,3,100
+t = TSTensor(torch.rand(bs, c_in, seq_len))
+enc_t = TSLinearPosition()(t)
+test_ne(enc_t, t)
+assert t.shape[1] == enc_t.shape[1] - 1
+plt.plot(enc_t[0, -1].cpu().numpy().T)
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = torch.arange(100)
+t1 = torch.cat([t[30:], t[:30]]).reshape(1, 1, -1)
+t2 = torch.cat([t[52:], t[:52]]).reshape(1, 1, -1)
+t = torch.cat([t1, t2]).float()
+mask = torch.rand_like(t) > .8
+t[mask] = np.nan
+t = TSTensor(t)
+enc_t = TSLinearPosition(linear_var=0, var_range=(0, 100), drop_var=True)(t)
+test_ne(enc_t, t)
+assert t.shape[1] == enc_t.shape[1]
+plt.plot(enc_t[0, -1].cpu().numpy().T)
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSMissingness

+
+
 TSMissingness (sel_vars=None, feature_idxs=None, magnitude=None,
+                **kwargs)
+
+

Concatenates data missingness for selected features along the sequence as additional variables

+
+
bs, c_in, seq_len = 1,3,100
+t = TSTensor(torch.rand(bs, c_in, seq_len))
+t[t>.5] = np.nan
+enc_t = TSMissingness(sel_vars=[0,2])(t)
+test_eq(enc_t.shape[1], 5)
+test_eq(enc_t[:, 3:], torch.isnan(t[:, [0,2]]).float())
+
+
+

source

+
+
+

TSPositionGaps

+
+
 TSPositionGaps (sel_vars=None, feature_idxs=None, magnitude=None,
+                 forward=True, backward=False, nearest=False,
+                 normalize=True, **kwargs)
+
+

Concatenates gaps for selected features along the sequence as additional variables

+
+
bs, c_in, seq_len = 1,3,8
+t = TSTensor(torch.rand(bs, c_in, seq_len))
+t[t>.5] = np.nan
+enc_t = TSPositionGaps(sel_vars=[0,2], forward=True, backward=True, nearest=True, normalize=False)(t)
+test_eq(enc_t.shape[1], 9)
+enc_t.data
+
+
tensor([[[0.2875, 0.0553,    nan,    nan, 0.1478, 0.1234, 0.0835, 0.1465],
+         [   nan,    nan, 0.3967,    nan, 0.0654,    nan, 0.2289, 0.1094],
+         [0.3820, 0.1613, 0.4825, 0.1379,    nan,    nan, 0.3000, 0.4673],
+         [1.0000, 1.0000, 1.0000, 2.0000, 3.0000, 1.0000, 1.0000, 1.0000],
+         [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 2.0000, 3.0000, 1.0000],
+         [1.0000, 3.0000, 2.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
+         [1.0000, 1.0000, 1.0000, 3.0000, 2.0000, 1.0000, 1.0000, 1.0000],
+         [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
+         [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
+
+
+
+

source

+
+
+

TSRollingMean

+
+
 TSRollingMean (sel_vars=None, feature_idxs=None, magnitude=None,
+                window=2, replace=False, **kwargs)
+
+

Calculates the rolling mean for all/ selected features alongside the sequence

+

It replaces the original values or adds additional variables (default) If nan values are found, they will be filled forward and backward

+
+
bs, c_in, seq_len = 1,3,8
+t = TSTensor(torch.rand(bs, c_in, seq_len))
+t[t > .6] = np.nan
+print(t.data)
+enc_t = TSRollingMean(sel_vars=[0,2], window=3)(t)
+test_eq(enc_t.shape[1], 5)
+print(enc_t.data)
+enc_t = TSRollingMean(window=3, replace=True)(t)
+test_eq(enc_t.shape[1], 3)
+print(enc_t.data)
+
+
tensor([[[   nan, 0.3836,    nan,    nan, 0.0237, 0.4363,    nan, 0.1834],
+         [0.2749, 0.5018,    nan, 0.4008, 0.2797, 0.4010, 0.4323, 0.3692],
+         [0.4013,    nan, 0.1272, 0.2202, 0.4324, 0.3293, 0.5350, 0.3919]]])
+tensor([[[0.3836, 0.3836, 0.3836, 0.3836, 0.0237, 0.4363, 0.4363, 0.1834],
+         [0.2749, 0.5018,    nan, 0.4008, 0.2797, 0.4010, 0.4323, 0.3692],
+         [0.4013, 0.4013, 0.1272, 0.2202, 0.4324, 0.3293, 0.5350, 0.3919],
+         [0.3836, 0.3836, 0.3836, 0.3836, 0.2636, 0.2812, 0.2988, 0.3520],
+         [0.4013, 0.4013, 0.3099, 0.2496, 0.2599, 0.3273, 0.4322, 0.4187]]])
+tensor([[[0.3836, 0.3836, 0.3836, 0.3836, 0.2636, 0.2812, 0.2988, 0.3520],
+         [0.2749, 0.3883, 0.4261, 0.4681, 0.3941, 0.3605, 0.3710, 0.4008],
+         [0.4013, 0.4013, 0.3099, 0.2496, 0.2599, 0.3273, 0.4322, 0.4187]]])
+
+
+
+

source

+
+
+

TSLogReturn

+
+
 TSLogReturn (lag=1, pad=True, **kwargs)
+
+

Calculates log-return of batch of type TSTensor. For positive values only

+
+
t = TSTensor([1,2,4,8,16,32,64,128,256]).float()
+test_eq(TSLogReturn(pad=False)(t).std(), 0)
+
+
+

source

+
+
+

TSAdd

+
+
 TSAdd (add, **kwargs)
+
+

Add a defined amount to each batch of type TSTensor.

+
+
t = TSTensor([1,2,3]).float()
+test_eq(TSAdd(1)(t), TSTensor([2,3,4]).float())
+
+
+

source

+
+
+

TSClipByVar

+
+
 TSClipByVar (var_min_max, **kwargs)
+
+

Clip batch of type TSTensor by variable

+

Args: var_min_max: list of tuples containing variable index, min value (or None) and max value (or None)

+
+
t = TSTensor(torch.rand(16, 3, 10) * tensor([1,10,100]).reshape(1,-1,1))
+max_values = t.max(0).values.max(-1).values.data
+max_values2 = TSClipByVar([(1,None,5), (2,10,50)])(t).max(0).values.max(-1).values.data
+test_le(max_values2[1], 5)
+test_ge(max_values2[2], 10)
+test_le(max_values2[2], 50)
+
+
+

source

+
+
+

TSDropVars

+
+
 TSDropVars (drop_vars, **kwargs)
+
+

Drops selected variable from the input

+
+
t = TSTensor(torch.arange(24).reshape(2, 3, 4))
+enc_t = TSDropVars(2)(t)
+test_ne(t, enc_t)
+enc_t.data
+
+
tensor([[[ 0,  1,  2,  3],
+         [ 4,  5,  6,  7]],
+
+        [[12, 13, 14, 15],
+         [16, 17, 18, 19]]])
+
+
+
+

source

+
+
+

TSOneHotEncode

+
+
 TSOneHotEncode (sel_var:int, unique_labels:list, add_na:bool=False,
+                 drop_var:bool=True, magnitude=None, **kwargs)
+
+

Delegates (__call__,decode,setup) to (encodes,decodes,setups) if split_idx matches

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
sel_varintVariable that is one-hot encoded
unique_labelslistList containing all labels (excluding nan values)
add_naboolFalseFlag to indicate if values not included in vocab should be set as 0
drop_varboolTrueFlag to indicate if the selected var is removed
magnitudeNoneTypeNoneAdded for compatibility. It’s not used.
kwargs
+
+
bs = 2
+seq_len = 5
+t_cont = torch.rand(bs, 1, seq_len)
+t_cat = torch.randint(0, 3, t_cont.shape)
+t = TSTensor(torch.cat([t_cat, t_cont], 1))
+t_cat
+
+
tensor([[[0, 2, 1, 0, 2]],
+
+        [[0, 0, 1, 1, 2]]])
+
+
+
+
tfm = TSOneHotEncode(0, [0, 1, 2])
+output = tfm(t)[:, -3:].data
+test_eq(t_cat, torch.argmax(tfm(t)[:, -3:], 1)[:, None])
+tfm(t)[:, -3:].data
+
+
tensor([[[1., 0., 0., 1., 0.],
+         [0., 0., 1., 0., 0.],
+         [0., 1., 0., 0., 1.]],
+
+        [[1., 1., 0., 0., 0.],
+         [0., 0., 1., 1., 0.],
+         [0., 0., 0., 0., 1.]]])
+
+
+
+
bs = 2
+seq_len = 5
+t_cont = torch.rand(bs, 1, seq_len)
+t_cat = torch.tensor([[10.,  5., 11., np.nan, 12.], [ 5., 12., 10., np.nan, 11.]])[:, None]
+t = TSTensor(torch.cat([t_cat, t_cont], 1))
+t_cat
+
+
tensor([[[10.,  5., 11., nan, 12.]],
+
+        [[ 5., 12., 10., nan, 11.]]])
+
+
+
+
tfm = TSOneHotEncode(0, [10, 11, 12], drop_var=False)
+mask = ~torch.isnan(t[:, 0])
+test_eq(tfm(t)[:, 0][mask], t[:, 0][mask])
+tfm(t)[:, -3:].data
+
+
tensor([[[1., 0., 0., 0., 0.],
+         [0., 0., 1., 0., 0.],
+         [0., 0., 0., 0., 1.]],
+
+        [[0., 0., 1., 0., 0.],
+         [0., 0., 0., 0., 1.],
+         [0., 1., 0., 0., 0.]]])
+
+
+
+
t1 = torch.randint(3, 7, (2, 1, 10))
+t2 = torch.rand(2, 1, 10)
+t = TSTensor(torch.cat([t1, t2], 1))
+output = TSOneHotEncode(0, [3, 4, 5], add_na=True, drop_var=True)(t)
+test_eq((t1 > 5).float(), output.data[:, [1]])
+test_eq((t1 == 3).float(), output.data[:, [2]])
+test_eq((t1 == 4).float(), output.data[:, [3]])
+test_eq((t1 == 5).float(), output.data[:, [4]])
+test_eq(output.shape, (t.shape[0], 5, t.shape[-1]))
+
+
+

source

+
+
+

TSPosition

+
+
 TSPosition (steps:list, magnitude=None, **kwargs)
+
+

Delegates (__call__,decode,setup) to (encodes,decodes,setups) if split_idx matches

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
stepslistList containing the steps passed as an additional variable. Theu should be normalized.
magnitudeNoneTypeNoneAdded for compatibility. It’s not used.
kwargs
+
+
t = TSTensor(torch.rand(2, 1, 10)).float()
+a = np.linspace(-1, 1, 10).astype('float64')
+TSPosition(a)(t).data.dtype, t.dtype
+
+
(torch.float32, torch.float32)
+
+
+
+

source

+
+
+

PatchEncoder

+
+
 PatchEncoder (patch_len:int, patch_stride:int=None,
+               pad_at_start:bool=True, value:float=0.0, seq_len:int=None,
+               merge_dims:bool=True, reduction:str='none',
+               reduction_dim:int=-1, swap_dims:tuple=None)
+
+

Creates a sequence of patches from a 3d input tensor.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
patch_lenintNumber of time steps in each patch.
patch_strideintNoneStride of the patch.
pad_at_startboolTrueIf True, pad the input tensor at the start to ensure that the input tensor is evenly divisible by the patch length.
valuefloat0.0Value to pad the input tensor with.
seq_lenintNoneNumber of time steps in the input tensor. If None, make sure seq_len >= patch_len and a multiple of stride
merge_dimsboolTrueIf True, merge channels within the same patch.
reductionstrnonetype of reduction applied. Available: “none”, “mean”, “min”, “max”, “mode”
reduction_dimint-1dimension where the reduction is applied
swap_dimstupleNoneIf True, swap the time and channel dimensions.
+
+
seq_len = 17
+patch_len = 10
+patch_stride = 5
+
+z11 = torch.arange(seq_len).reshape(1, 1, -1)
+z12 = torch.arange(seq_len).reshape(1, 1, -1) * 10
+z1 = torch.cat((z11, z12), dim=1)
+z21 = torch.arange(seq_len).reshape(1, 1, -1)
+z22 = torch.arange(seq_len).reshape(1, 1, -1) * 10
+z2 = torch.cat((z21, z22), dim=1) + 1
+z31 = torch.arange(seq_len).reshape(1, 1, -1)
+z32 = torch.arange(seq_len).reshape(1, 1, -1) * 10
+z3 = torch.cat((z31, z32), dim=1) + 2
+z = torch.cat((z11, z21, z31), dim=0)
+z = torch.cat((z1, z2, z3), dim=0)
+print(z.shape, "\n")
+print(z)
+
+patch_encoder = PatchEncoder(patch_len=patch_len, patch_stride=patch_stride, value=-1, seq_len=seq_len, merge_dims=True)
+output = patch_encoder(z)
+print(output.shape, "\n")
+first_token = output[..., 0]
+expected_first_token = torch.tensor([[-1, -1, -1,  0,  1,  2,  3,  4,  5,  6, -1, -1, -1,  0, 10, 20, 30, 40,
+         50, 60],
+        [-1, -1, -1,  1,  2,  3,  4,  5,  6,  7, -1, -1, -1,  1, 11, 21, 31, 41,
+         51, 61],
+        [-1, -1, -1,  2,  3,  4,  5,  6,  7,  8, -1, -1, -1,  2, 12, 22, 32, 42,
+         52, 62]])
+test_eq(first_token, expected_first_token)
+
+
torch.Size([3, 2, 17]) 
+
+tensor([[[  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,
+           14,  15,  16],
+         [  0,  10,  20,  30,  40,  50,  60,  70,  80,  90, 100, 110, 120, 130,
+          140, 150, 160]],
+
+        [[  1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,  14,
+           15,  16,  17],
+         [  1,  11,  21,  31,  41,  51,  61,  71,  81,  91, 101, 111, 121, 131,
+          141, 151, 161]],
+
+        [[  2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,  14,  15,
+           16,  17,  18],
+         [  2,  12,  22,  32,  42,  52,  62,  72,  82,  92, 102, 112, 122, 132,
+          142, 152, 162]]])
+torch.Size([3, 20, 3]) 
+
+
+
+
+

source

+
+
+

TSPatchEncoder

+
+
 TSPatchEncoder (patch_len:int, patch_stride:int=None,
+                 pad_at_start:bool=True, value:float=0.0,
+                 seq_len:int=None, merge_dims:bool=True,
+                 reduction:str='none', reduction_dim:int=-2,
+                 swap_dims:tuple=None)
+
+

Tansforms a time series into a sequence of patches along the last dimension

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
patch_lenintNumber of time steps in each patch.
patch_strideintNoneStride of the patch.
pad_at_startboolTrueIf True, pad the input tensor at the start to ensure that the input tensor is evenly divisible by the patch length.
valuefloat0.0Value to pad the input tensor with.
seq_lenintNoneNumber of time steps in the input tensor. If None, make sure seq_len >= patch_len and a multiple of stride
merge_dimsboolTrueIf True, merge channels within the same patch.
reductionstrnonetype of reduction applied. Available: “none”, “mean”, “min”, “max”, “mode”
reduction_dimint-2dimension where the y reduction is applied.
swap_dimstupleNoneIf True, swap the time and channel dimensions.
+
+
bs = 2
+c_in = 1
+seq_len = 10
+patch_len = 4
+
+t = TSTensor(torch.arange(bs * c_in * seq_len).reshape(bs, c_in, seq_len))
+print(t.data)
+print(t.shape, "\n")
+
+patch_encoder = TSPatchEncoder(patch_len=patch_len, patch_stride=1, seq_len=seq_len)
+output = patch_encoder(t)
+test_eq(output.shape, ([bs, patch_len, 7]))
+print("first patch:\n", output[..., 0].data, "\n")
+
+patch_encoder = TSPatchEncoder(patch_len=patch_len, patch_stride=None, seq_len=seq_len)
+output = patch_encoder(t)
+test_eq(output.shape, ([bs, patch_len, 3]))
+print("first patch:\n", output[..., 0].data, "\n")
+
+
tensor([[[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9]],
+
+        [[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]])
+torch.Size([2, 1, 10]) 
+
+first patch:
+ tensor([[ 0,  1,  2,  3],
+        [10, 11, 12, 13]]) 
+
+first patch:
+ tensor([[ 0,  0,  0,  1],
+        [ 0,  0, 10, 11]]) 
+
+
+
+
+

source

+
+
+

TSTuplePatchEncoder

+
+
 TSTuplePatchEncoder (patch_len:int, patch_stride:int=None,
+                      pad_at_start:bool=True, value:float=0.0,
+                      seq_len:int=None, merge_dims:bool=True,
+                      reduction:str='none', reduction_dim:int=-2,
+                      swap_dims:tuple=None)
+
+

Tansforms a time series with x and y into sequences of patches along the last dimension

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
patch_lenintNumber of time steps in each patch.
patch_strideintNoneStride of the patch.
pad_at_startboolTrueIf True, pad the input tensor at the start to ensure that the input tensor is evenly divisible by the patch length.
valuefloat0.0Value to pad the input tensor with.
seq_lenintNoneNumber of time steps in the input tensor. If None, make sure seq_len >= patch_len and a multiple of stride
merge_dimsboolTrueIf True, merge y channels within the same patch.
reductionstrnonetype of reduction applied to y. Available: “none”, “mean”, “min”, “max”, “mode”
reduction_dimint-2dimension where the y reduction is applied.
swap_dimstupleNoneIf True, swap the time and channel dimensions in y.
+
+
# test
+bs = 2
+c_in = 2
+seq_len = 10
+patch_len = 4
+
+x = torch.arange(bs * c_in * seq_len).reshape(bs, c_in, seq_len)
+y = torch.arange(bs * c_in * seq_len).reshape(bs, c_in, seq_len) * 10
+print(x)
+print(y)
+
+
+patch_encoder = TSTuplePatchEncoder(patch_len=patch_len, patch_stride=1, seq_len=seq_len, merge_dims=True)
+x_out, y_out = patch_encoder((x, y))
+test_eq(x_out.shape, ([bs, c_in * patch_len, 7]))
+test_eq(y_out.shape, ([bs, c_in * patch_len, 7]))
+print("first x patch:\n", x_out[..., 0].data, "\n")
+print("first y patch:\n", y_out[..., 0].data, "\n")
+
+patch_encoder = TSTuplePatchEncoder(patch_len=patch_len, patch_stride=1, seq_len=seq_len, merge_dims=False, reduction="max")
+x_out, y_out = patch_encoder((x, y))
+test_eq(x_out.shape, ([bs, c_in * patch_len, 7]))
+test_eq(y_out.shape, ([bs, c_in, 7]))
+print("first x patch:\n", x_out[..., 0].data, "\n")
+print("first y patch:\n", y_out[..., 0].data, "\n")
+
+
tensor([[[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
+         [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]],
+
+        [[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+         [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]]])
+tensor([[[  0,  10,  20,  30,  40,  50,  60,  70,  80,  90],
+         [100, 110, 120, 130, 140, 150, 160, 170, 180, 190]],
+
+        [[200, 210, 220, 230, 240, 250, 260, 270, 280, 290],
+         [300, 310, 320, 330, 340, 350, 360, 370, 380, 390]]])
+first x patch:
+ tensor([[ 0,  1,  2,  3, 10, 11, 12, 13],
+        [20, 21, 22, 23, 30, 31, 32, 33]]) 
+
+first y patch:
+ tensor([[  0,  10,  20,  30, 100, 110, 120, 130],
+        [200, 210, 220, 230, 300, 310, 320, 330]]) 
+
+first x patch:
+ tensor([[ 0,  1,  2,  3, 10, 11, 12, 13],
+        [20, 21, 22, 23, 30, 31, 32, 33]]) 
+
+first y patch:
+ tensor([[ 30, 130],
+        [230, 330]]) 
+
+
+
+
+
+

sklearn API transforms

+
+

source

+
+

object2date

+
+
 object2date (x, format=None)
+
+
+

source

+
+
+

TSShrinkDataFrame

+
+
 TSShrinkDataFrame (columns=None, skip=None, obj2cat=True, int2uint=False,
+                    verbose=True)
+
+

A transformer to shrink dataframe or series memory usage

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsNoneTypeNoneList[str], optional. Columns to shrink, all columns by default.
skipNoneTypeNoneList[str], optional. Columns to skip, None by default.
obj2catboolTruebool, optional. Convert object columns to category, True by default.
int2uintboolFalsebool, optional. Convert int columns to uint, False by default.
verboseboolTruebool, optional. Print memory usage info. True by default.
+
+
df = pd.DataFrame()
+df["ints64"] = np.random.randint(0,3,10)
+df['floats64'] = np.random.rand(10)
+tfm = TSShrinkDataFrame()
+df = tfm.fit_transform(df)
+test_eq(df["ints64"].dtype, "int8")
+test_eq(df["floats64"].dtype, "float32")
+
+
Initial memory usage: 288.00 B  
+Final memory usage  : 178.00 B   (-38.2%)
+
+
+
+
# test with date
+df = pd.DataFrame()
+df["dates"] = pd.date_range('1/1/2011', periods=10, freq='M').astype(str)
+df["ints64"] = np.random.randint(0,3,10)
+df['floats64'] = np.random.rand(10)
+tfm = TSShrinkDataFrame()
+df = tfm.fit_transform(df)
+test_eq(df["dates"].dtype, "datetime64[ns]")
+test_eq(df["ints64"].dtype, "int8")
+test_eq(df["floats64"].dtype, "float32")
+
+
Initial memory usage: 368.00 B  
+Final memory usage  : 258.00 B   (-29.9%)
+
+
+
+
# test with date and series
+df = pd.DataFrame()
+df["dates"] = pd.date_range('1/1/2011', periods=10, freq='M').astype(str)
+tfm = TSShrinkDataFrame()
+df = tfm.fit_transform(df["dates"])
+test_eq(df.dtype, "datetime64[ns]")
+
+
Initial memory usage: 208.00 B  
+Final memory usage  : 208.00 B   (0.0%)
+
+
+
+

source

+
+
+

TSOneHotEncoder

+
+
 TSOneHotEncoder (columns=None, drop=True, add_na=True, dtype=<class
+                  'numpy.int8'>)
+
+

Encode categorical variables using one-hot encoding

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsNoneTypeNone(str or List[str], optional): Column name(s) to encode. If None, all columns will be encoded. Defaults to None.
dropboolTrue(bool, optional): Whether to drop the original columns after encoding. Defaults to True.
add_naboolTrue(bool, optional): Whether to add a ‘NaN’ category for missing values. Defaults to True.
dtypetypeint8(type, optional): Data type of the encoded output. Defaults to np.int64.
+
+
df = pd.DataFrame()
+df["a"] = np.random.randint(0,2,10)
+df["b"] = np.random.randint(0,3,10)
+unique_cols = len(df["a"].unique()) + len(df["b"].unique())
+tfm = TSOneHotEncoder()
+tfm.fit(df)
+df = tfm.transform(df)
+test_eq(df.shape[1], unique_cols)
+
+
+

source

+
+
+

TSCategoricalEncoder

+
+
 TSCategoricalEncoder (columns=None, add_na=True, sort=True,
+                       categories='auto', inplace=True, prefix=None,
+                       suffix=None, drop=False)
+
+

A transformer to encode categorical columns

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsNoneTypeNoneList[str], optional. Columns to encode, all columns by default.
add_naboolTruebool, optional. Add a NaN category, True by default.
sortboolTruebool, optional. Sort categories by frequency, True by default.
categoriesstrautodict, optional. The custom mapping of categories. ‘auto’ by default.
inplaceboolTruebool, optional. Modify input DataFrame, True by default.
prefixNoneTypeNonestr, optional. Prefix for created column names. None by default.
suffixNoneTypeNonestr, optional. Suffix for created column names. None by default.
dropboolFalsebool, optional. Drop original columns, False by default.
+

Stateful transforms like TSCategoricalEncoder can easily be serialized.

+
+
import joblib
+
+
+
df = pd.DataFrame()
+df["a"] = alphabet[np.random.randint(0,2,100)]
+df["b"] = ALPHABET[np.random.randint(0,3,100)]
+display(df)
+a_unique = len(df["a"].unique())
+b_unique = len(df["b"].unique())
+tfm = TSCategoricalEncoder()
+tfm.fit(df, idxs=slice(0, 50))
+joblib.dump(tfm, "data/TSCategoricalEncoder.joblib")
+tfm = joblib.load("data/TSCategoricalEncoder.joblib")
+df.loc[0, "a"] = 'z'
+df.loc[1, "a"] = 'h'
+df = tfm.transform(df)
+display(df)
+test_eq(df['a'].max(), a_unique)
+test_eq(df['b'].max(), b_unique)
+df = tfm.inverse_transform(df)
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
0bB
1bA
2bC
3aC
4bC
.........
95aA
96aA
97aB
98aA
99bB
+ +

100 rows × 2 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
002
101
223
313
423
.........
9511
9611
9712
9811
9922
+ +

100 rows × 2 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
0#na#B
1#na#A
2bC
3aC
4bC
.........
95aA
96aA
97aB
98aA
99bB
+ +

100 rows × 2 columns

+
+
+
+
+
df = pd.DataFrame()
+df["a"] = alphabet[np.random.randint(0,2,100)]
+df["a"] = df["a"].astype('category')
+df["b"] = ALPHABET[np.random.randint(0,3,100)]
+display(df)
+a_unique = len(df["a"].unique())
+b_unique = len(df["b"].unique())
+tfm = TSCategoricalEncoder()
+tfm.fit(df)
+joblib.dump(tfm, "data/TSCategoricalEncoder.joblib")
+tfm = joblib.load("data/TSCategoricalEncoder.joblib")
+df["a"] = alphabet[np.random.randint(0,5,100)]
+df["a"] = df["a"].astype('category')
+df["b"] = ALPHABET[np.random.randint(0,3,100)]
+display(df)
+df = tfm.transform(df)
+display(df)
+test_eq(df['a'].max(), a_unique)
+test_eq(df['b'].max(), b_unique)
+df = tfm.inverse_transform(df)
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
0bB
1aC
2bC
3aC
4bB
.........
95bA
96bA
97aA
98bB
99bB
+ +

100 rows × 2 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
0dA
1aA
2cA
3aA
4aB
.........
95cC
96dB
97cA
98bB
99eB
+ +

100 rows × 2 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
001
111
201
311
412
.........
9503
9602
9701
9822
9902
+ +

100 rows × 2 columns

+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ab
0#na#A
1aA
2#na#A
3aA
4aB
.........
95#na#C
96#na#B
97#na#A
98bB
99#na#B
+ +

100 rows × 2 columns

+
+
+
+
+
df = pd.DataFrame()
+df["a"] = alphabet[np.random.randint(0,2,100)]
+df["a"] = df["a"].astype('category')
+s = df['a']
+display(s)
+tfm = TSCategoricalEncoder()
+tfm.fit(s)
+joblib.dump(tfm, "data/TSCategoricalEncoder.joblib")
+tfm = joblib.load("data/TSCategoricalEncoder.joblib")
+s = tfm.transform(s)
+display(s)
+s = tfm.inverse_transform(s)
+display(s)
+
+
0     a
+1     b
+2     a
+3     a
+4     a
+     ..
+95    a
+96    a
+97    a
+98    a
+99    b
+Name: a, Length: 100, dtype: category
+Categories (2, object): ['a', 'b']
+
+
+
0     1
+1     2
+2     1
+3     1
+4     1
+     ..
+95    1
+96    1
+97    1
+98    1
+99    2
+Length: 100, dtype: int8
+
+
+
0     a
+1     b
+2     a
+3     a
+4     a
+     ..
+95    a
+96    a
+97    a
+98    a
+99    b
+Length: 100, dtype: object
+
+
+
+

source

+
+
+

TSTargetEncoder

+
+
 TSTargetEncoder (target_column, columns=None, inplace=True, prefix=None,
+                  suffix=None, drop=True, dtypes=['object', 'category'])
+
+

Mixin class for all transformers in scikit-learn.

+

If :term:get_feature_names_out is defined, then :class:BaseEstimator will automatically wrap transform and fit_transform to follow the set_output API. See the :ref:developer_api_set_output for details.

+ + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
class:OneToOneFeatureMixin and :class:ClassNamePrefixFeaturesOutMixin are helpful mixins for defining :term:get_feature_names_out.
TypeDefaultDetails
target_columncolumn containing the target
columnsNoneTypeNoneList[str], optional. Columns to encode, all non-numerical columns by default.
inplaceboolTruebool, optional. Modify input DataFrame, True by default.
prefixNoneTypeNonestr, optional. Prefix for created column names. None by default.
suffixNoneTypeNonestr, optional. Suffix for created column names. None by default.
dropboolTruebool, optional. Drop original columns, False by default.
dtypeslist[‘object’, ‘category’]List[str]. List with dtypes that will be used to identify columns to encode if not explicitly passed.
+
+
from sklearn.model_selection import train_test_split
+
+# Create a dataframe with 100 rows
+np.random.seed(42)
+df = pd.DataFrame({
+    'category1': np.random.choice(['cat', 'dog', 'rabbit'], 100),
+    'category2': np.random.choice(['large', 'small'], 100),
+    'continuous': np.random.rand(100),
+    'target': np.random.randint(0, 2, 100)
+})
+
+display(df)
+
+# Split the data into train and test sets
+train_idx, test_idx = train_test_split(np.arange(100), test_size=0.2, random_state=42)
+print(train_idx.shape)
+
+# Initialize the encoder
+encoder = TSTargetEncoder(columns=['category1', 'category2'], target_column='target', inplace=False, suffix="te", drop=False)
+
+# Fit the encoder using the training data
+encoder.fit(df, idxs=train_idx)
+
+# Transform the whole dataframe
+df_encoded = encoder.transform(df)
+
+# Check the results
+for c in ["category1", "category2"]:
+    for v in df[c].unique():
+        assert df.loc[train_idx][df.loc[train_idx, c] == v]["target"].mean() == df_encoded[df_encoded[c] == v][f"{c}_te"].mean()
+        
+df_encoded
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
category1category2continuoustarget
0rabbitsmall0.8960910
1catsmall0.3180031
2rabbitsmall0.1100521
3rabbitlarge0.2279350
4catlarge0.4271080
...............
95catsmall0.3254000
96catlarge0.7464910
97rabbitsmall0.6496331
98catsmall0.8492230
99catlarge0.6576131
+ +

100 rows × 4 columns

+
+
+
+
(80,)
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
category1category2continuoustargetcategory1_tecategory2_te
0rabbitsmall0.89609100.5652170.500000
1catsmall0.31800310.5555560.500000
2rabbitsmall0.11005210.5652170.500000
3rabbitlarge0.22793500.5652170.521739
4catlarge0.42710800.5555560.521739
.....................
95catsmall0.32540000.5555560.500000
96catlarge0.74649100.5555560.521739
97rabbitsmall0.64963310.5652170.500000
98catsmall0.84922300.5555560.500000
99catlarge0.65761310.5555560.521739
+ +

100 rows × 6 columns

+
+
+
+
+

source

+
+
+

TSDateTimeEncoder

+
+
 TSDateTimeEncoder (datetime_columns=None, prefix=None, drop=True,
+                    time=False, attr=['Year', 'Month', 'Week', 'Day',
+                    'Dayofweek', 'Dayofyear', 'Is_month_end',
+                    'Is_month_start', 'Is_quarter_end',
+                    'Is_quarter_start', 'Is_year_end', 'Is_year_start'])
+
+

Base class for all estimators in scikit-learn.

+

Inheriting from this class provides default implementations of:

+
    +
  • setting and getting parameters used by GridSearchCV and friends;
  • +
  • textual and HTML representation displayed in terminals and IDEs;
  • +
  • estimator serialization;
  • +
  • parameters validation;
  • +
  • data validation;
  • +
  • feature names validation.
  • +
+

Read more in the :ref:User Guide <rolling_your_own_estimator>.

+
+
import datetime as dt
+
+
+
df = pd.DataFrame()
+df.loc[0, "date"] = dt.datetime.now()
+df.loc[1, "date"] = dt.datetime.now() + pd.Timedelta(1, unit="D")
+tfm = TSDateTimeEncoder()
+joblib.dump(tfm, "data/TSDateTimeEncoder.joblib")
+tfm = joblib.load("data/TSDateTimeEncoder.joblib")
+tfm.fit_transform(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
_Year_Month_Week_Day_Dayofweek_Dayofyear_Is_month_end_Is_month_start_Is_quarter_end_Is_quarter_start_Is_year_end_Is_year_start
02023624175168FalseFalseFalseFalseFalseFalse
12023624186169FalseFalseFalseFalseFalseFalse
+ +
+
+
+
+

source

+
+
+

TSDropIfTrueCols

+
+
 TSDropIfTrueCols (columns=None)
+
+

Base class for all estimators in scikit-learn.

+

Inheriting from this class provides default implementations of:

+
    +
  • setting and getting parameters used by GridSearchCV and friends;
  • +
  • textual and HTML representation displayed in terminals and IDEs;
  • +
  • estimator serialization;
  • +
  • parameters validation;
  • +
  • data validation;
  • +
  • feature names validation.
  • +
+

Read more in the :ref:User Guide <rolling_your_own_estimator>.

+
+
# test TSDropIfTrueCols
+df = pd.DataFrame()
+df["a"] = [0, 0, 1, 0, 0]
+df["b"] = [0, 0, 0, 0, 0]
+df["c"] = [0, 1, 0, 0, 1]
+
+expected_output = pd.DataFrame()
+expected_output["b"] = [0, 0, 0, 0]
+expected_output["c"] = [0, 1, 0, 1]
+
+tfm = TSDropIfTrueCols("a")
+output = tfm.fit_transform(df)
+test_eq(output, expected_output),
+
+
(None,)
+
+
+
+

source

+
+
+

TSApplyFunction

+
+
 TSApplyFunction (function, groups=None, group_keys=False, axis=1,
+                  columns=None, reset_index=False, drop=True)
+
+

Base class for all estimators in scikit-learn.

+

Inheriting from this class provides default implementations of:

+
    +
  • setting and getting parameters used by GridSearchCV and friends;
  • +
  • textual and HTML representation displayed in terminals and IDEs;
  • +
  • estimator serialization;
  • +
  • parameters validation;
  • +
  • data validation;
  • +
  • feature names validation.
  • +
+

Read more in the :ref:User Guide <rolling_your_own_estimator>.

+
+
df = pd.DataFrame()
+df["a"] = [0, 0, 1, 0, 0]
+df["b"] = [0, 0, 0, 0, 0]
+df["c"] = [0, 1, 0, 0, 1]
+
+df.apply(lambda x: 1, )
+
+
a    1
+b    1
+c    1
+dtype: int64
+
+
+
+
# test ApplyFunction without groups
+df = pd.DataFrame()
+df["a"] = [0, 0, 1, 0, 0]
+df["b"] = [0, 0, 0, 0, 0]
+df["c"] = [0, 1, 0, 0, 1]
+
+expected_output = pd.Series([1,1,1])
+
+tfm = TSApplyFunction(lambda x: 1, axis=0, reset_index=True)
+output = tfm.fit_transform(df)
+test_eq(output, expected_output)
+
+
+
# test ApplyFunction with groups and square function
+df = pd.DataFrame()
+df["a"] = [0, 1, 2, 3, 4]
+df["id"] = [0, 0, 0, 1, 1]
+
+expected_output = pd.Series([5, 25])
+
+tfm = TSApplyFunction(lambda x: (x["a"]**2).sum(), groups="id")
+
+output = tfm.fit_transform(df)
+test_eq(output, expected_output)
+
+
+

source

+
+
+

TSMissingnessEncoder

+
+
 TSMissingnessEncoder (columns=None)
+
+

Base class for all estimators in scikit-learn.

+

Inheriting from this class provides default implementations of:

+
    +
  • setting and getting parameters used by GridSearchCV and friends;
  • +
  • textual and HTML representation displayed in terminals and IDEs;
  • +
  • estimator serialization;
  • +
  • parameters validation;
  • +
  • data validation;
  • +
  • feature names validation.
  • +
+

Read more in the :ref:User Guide <rolling_your_own_estimator>.

+
+
data = np.random.rand(10,3)
+data[data > .8] = np.nan
+df = pd.DataFrame(data, columns=["a", "b", "c"])
+tfm = TSMissingnessEncoder()
+tfm.fit(df)
+joblib.dump(tfm, "data/TSMissingnessEncoder.joblib")
+tfm = joblib.load("data/TSMissingnessEncoder.joblib")
+df = tfm.transform(df)
+df
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abca_missingb_missingc_missing
0NaNNaNNaN111
10.5113420.5015160.798295000
20.6499640.7019670.795793000
3NaN0.3379950.375583100
40.0939820.5782800.035942000
50.4655980.5426450.286541000
60.5908330.0305000.037348000
7NaN0.3601910.127061100
80.5222430.7699940.215821000
90.6228900.0853470.051682000
+ +
+
+
+
+

source

+
+
+

TSSortByColumns

+
+
 TSSortByColumns (columns, ascending=True, inplace=True, kind='stable',
+                  na_position='last', ignore_index=False, key=None)
+
+

Transforms a dataframe by sorting by columns.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsColumns to sort by
ascendingboolTrueAscending or descending
inplaceboolTruePerform operation in place
kindstrstableType of sort to use
na_positionstrlastWhere to place NaNs
ignore_indexboolFalseDo not preserve index
keyNoneTypeNoneFunction to apply to values before sorting
+
+
# Test
+df = pd.DataFrame(np.random.rand(10,3), columns=["a", "b", "c"])
+df_ori = df.copy()
+tfm = TSSortByColumns(["a", "b"])
+df = tfm.fit_transform(df)
+test_eq(df_ori.sort_values(["a", "b"]).values, df.values)
+
+
+

source

+
+
+

TSSelectColumns

+
+
 TSSelectColumns (columns)
+
+

Transform used to select columns

+ + + + + + + + + + + + + +
Details
columnsstr or List[str]. Selected columns.
+
+
# Test
+df = pd.DataFrame(np.random.rand(10,3), columns=["a", "b", "c"])
+df_ori = df.copy()
+tfm = TSSelectColumns(["a", "b"])
+df = tfm.fit_transform(df)
+test_eq(df_ori[["a", "b"]].values, df.values)
+df = tfm.inverse_transform(df)
+
+
+

source

+
+
+

TSStepsSinceStart

+
+
 TSStepsSinceStart (datetime_col, datetime_unit='D', start_datetime=None,
+                    drop=False, dtype=None)
+
+

Add a column indicating the number of steps since the start in each row

+
+
# Test
+df = pd.DataFrame(np.random.rand(10,3), columns=["a", "b", "c"])
+df["datetime"] = pd.date_range("2020-01-01", periods=10)
+display(df)
+df_ori = df.copy()
+tfm = TSStepsSinceStart("datetime", datetime_unit="D", drop=True, dtype=np.int32)
+df = tfm.fit_transform(df)
+display(df)
+test_eq(df["days_since_start"].values, np.arange(10))
+df = tfm.inverse_transform(df)
+test_eq(df_ori.values, df.values)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetime
00.6432880.4582530.5456172020-01-01
10.9414650.3861030.9611912020-01-02
20.9053510.1957910.0693612020-01-03
30.1007780.0182220.0944432020-01-04
40.6830070.0711890.3189762020-01-05
50.8448750.0232720.8144682020-01-06
60.2818550.1181650.6967372020-01-07
70.6289430.8774720.7350712020-01-08
80.8034810.2820350.1774402020-01-09
90.7506150.8068350.9905052020-01-10
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdays_since_start
00.6432880.4582530.5456170
10.9414650.3861030.9611911
20.9053510.1957910.0693612
30.1007780.0182220.0944433
40.6830070.0711890.3189764
50.8448750.0232720.8144685
60.2818550.1181650.6967376
70.6289430.8774720.7350717
80.8034810.2820350.1774408
90.7506150.8068350.9905059
+ +
+
+
+
+

source

+
+
+

TSStandardScaler

+
+
 TSStandardScaler (columns=None, mean=None, std=None, eps=1e-06)
+
+

Scale the values of specified columns in the input DataFrame to have a mean of 0 and standard deviation of 1.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsNoneTypeNoneColumn name(s) to be transformed. If None, all columns are transformed. Defaults to None.
meanNoneTypeNoneMean value for each column. If None, the mean value of each column is calculated during the fit method. Defaults to None.
stdNoneTypeNoneStdev value for each column. If None, the standard deviation value of each column is calculated during the fit method. Defaults to None.
epsfloat1e-06A small value to avoid division by zero. Defaults to 1e-6.
+
+
# Test
+df = pd.DataFrame(np.random.rand(100,3), columns=["a", "b", "c"])
+tfm = TSStandardScaler()
+df = tfm.fit_transform(df)
+test_close(df.mean().values, np.zeros(3), 1e-3)
+test_close(df.std().values, np.ones(3), 1e-3)
+
+
+
# Test
+df = pd.DataFrame(np.random.rand(1000,3), columns=["a", "b", "c"])
+tfm = TSStandardScaler()
+df = tfm.fit_transform(df, idxs=slice(0, 800))
+test_close(df.mean().values, np.zeros(3), 1e-1)
+test_close(df.std().values, np.ones(3), 1e-1)
+
+
+

source

+
+
+

TSRobustScaler

+
+
 TSRobustScaler (columns=None, quantile_range=(25.0, 75.0), eps=1e-06)
+
+

This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range)

+
+
# test RobustScaler
+df = pd.DataFrame(np.random.rand(100,3), columns=["a", "b", "c"])
+df["a"] = df["a"] * 100
+df["b"] = df["b"] * 10
+tfm = TSRobustScaler()
+df = tfm.fit_transform(df)
+test_close(df.median().values, np.zeros(3), 1e-3)
+
+
+

source

+
+
+

TSAddMissingTimestamps

+
+
 TSAddMissingTimestamps (datetime_col=None, use_index=False,
+                         unique_id_cols=None, fill_value=nan,
+                         range_by_group=True, start_date=None,
+                         end_date=None, freq=None)
+
+

Mixin class for all transformers in scikit-learn.

+

If :term:get_feature_names_out is defined, then :class:BaseEstimator will automatically wrap transform and fit_transform to follow the set_output API. See the :ref:developer_api_set_output for details.

+

:class:OneToOneFeatureMixin and :class:ClassNamePrefixFeaturesOutMixin are helpful mixins for defining :term:get_feature_names_out.

+
+
# Test
+df = pd.DataFrame(np.random.rand(10,3), columns=["a", "b", "c"])
+df["datetime"] = pd.date_range("2020-01-01", periods=10)
+df = df.iloc[[0, 2, 3, 5, 6, 8, 9]]
+display(df)
+tfm = TSAddMissingTimestamps(datetime_col="datetime", freq="D")
+df = tfm.fit_transform(df)
+display(df)
+test_eq(df.shape[0], 10)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetime
00.2111260.7524680.0512942020-01-01
20.3945720.5299410.1613672020-01-03
30.5719960.8054320.7601612020-01-04
50.3610750.4084560.6796972020-01-06
60.0566800.0346730.3919112020-01-07
80.2598280.8860860.8956902020-01-09
90.2972870.2299940.4113042020-01-10
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datetimeabc
02020-01-010.2111260.7524680.051294
12020-01-02NaNNaNNaN
22020-01-030.3945720.5299410.161367
32020-01-040.5719960.8054320.760161
42020-01-05NaNNaNNaN
52020-01-060.3610750.4084560.679697
62020-01-070.0566800.0346730.391911
72020-01-08NaNNaNNaN
82020-01-090.2598280.8860860.895690
92020-01-100.2972870.2299940.411304
+ +
+
+
+
+
# Test
+# Filling dates between min and max dates for each value in groupby column
+dates = pd.date_range('2021-05-01', '2021-05-07').values
+dates = np.concatenate((dates, dates))
+data = np.zeros((len(dates), 4))
+data[:, 0] = dates
+data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
+data[:, 2] = np.random.rand(len(dates))
+data[:, 3] = np.random.rand(len(dates))
+cols = ['date', 'id', 'feature1', 'feature2']
+date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
+date_df_with_missing_dates = date_df.drop([0,1,3,8,11,13]).reset_index(drop=True)
+display(date_df_with_missing_dates)
+tfm = TSAddMissingTimestamps(datetime_col="date", unique_id_cols="id", freq="D")
+df = tfm.fit_transform(date_df_with_missing_dates.copy())
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-0300.8260650.793818
12021-05-0500.8243500.577807
22021-05-0600.3969920.866102
32021-05-0700.1563170.289440
42021-05-0110.7379510.467681
52021-05-0310.6712710.411190
62021-05-0410.2706440.427486
72021-05-0610.9925820.564232
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-0300.8260650.793818
12021-05-040NaNNaN
22021-05-0500.8243500.577807
32021-05-0600.3969920.866102
42021-05-0700.1563170.289440
52021-05-0110.7379510.467681
62021-05-021NaNNaN
72021-05-0310.6712710.411190
82021-05-0410.2706440.427486
92021-05-051NaNNaN
102021-05-0610.9925820.564232
+ +
+
+
+
+
# Test
+display(date_df_with_missing_dates)
+tfm = TSAddMissingTimestamps(datetime_col="date", unique_id_cols="id", freq="D", range_by_group=False)
+df = tfm.fit_transform(date_df_with_missing_dates.copy())
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-0300.8260650.793818
12021-05-0500.8243500.577807
22021-05-0600.3969920.866102
32021-05-0700.1563170.289440
42021-05-0110.7379510.467681
52021-05-0310.6712710.411190
62021-05-0410.2706440.427486
72021-05-0610.9925820.564232
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dateidfeature1feature2
02021-05-010NaNNaN
12021-05-020NaNNaN
22021-05-0300.8260650.793818
32021-05-040NaNNaN
42021-05-0500.8243500.577807
52021-05-0600.3969920.866102
62021-05-0700.1563170.289440
72021-05-0110.7379510.467681
82021-05-021NaNNaN
92021-05-0310.6712710.411190
102021-05-0410.2706440.427486
112021-05-051NaNNaN
122021-05-0610.9925820.564232
132021-05-071NaNNaN
+ +
+
+
+
+

source

+
+
+

TSDropDuplicates

+
+
 TSDropDuplicates (datetime_col=None, use_index=False,
+                   unique_id_cols=None, keep='last', reset_index=False)
+
+

Drop rows with duplicated values in a set of columns, optionally including a datetime column or index

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
datetime_colNoneTypeNone(str or List[str], optional): Name(s) of column(s) containing datetime values. If None, the index is used if use_index=True.
use_indexboolFalse(bool, optional): Whether to include the index in the set of columns for checking duplicates. Defaults to False.
unique_id_colsNoneTypeNone(str or List[str], optional): Name(s) of column(s) to be included in the set of columns for checking duplicates. Defaults to None.
keepstrlast(str, optional): Which duplicated values to keep. Choose from {‘first’, ‘last’, False}. Defaults to ‘last’.
reset_indexboolFalse(bool, optional): Whether to reset the index after dropping duplicates. Ignored if use_index=False. Defaults to False.
+
+
# Test
+df = pd.DataFrame(np.random.rand(10,3), columns=["a", "b", "c"])
+df["datetime"] = pd.date_range("2020-01-01", periods=10)
+df['user_id'] = np.sort(np.random.randint(0, 2, 10))
+df = df.iloc[[0, 2, 2, 3, 5, 6, 6, 8, 9]]
+df.reset_index(drop=True, inplace=True)
+display(df)
+tfm = TSDropDuplicates(datetime_col="datetime", unique_id_cols="a")
+df = tfm.fit_transform(df)
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_id
00.2015280.9344330.6890882020-01-010
10.0162000.8183800.0401392020-01-030
20.0162000.8183800.0401392020-01-030
30.8899130.9919630.2940672020-01-040
40.8655620.1028430.1259552020-01-061
50.9791520.6738390.8468872020-01-071
60.9791520.6738390.8468872020-01-071
70.6031500.6825320.5753592020-01-091
80.4290620.2759230.7685812020-01-101
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_id
00.2015280.9344330.6890882020-01-010
20.0162000.8183800.0401392020-01-030
30.8899130.9919630.2940672020-01-040
40.8655620.1028430.1259552020-01-061
60.9791520.6738390.8468872020-01-071
70.6031500.6825320.5753592020-01-091
80.4290620.2759230.7685812020-01-101
+ +
+
+
+
+

source

+
+
+

TSFillMissing

+
+
 TSFillMissing (columns=None, unique_id_cols=None, method='ffill',
+                value=0)
+
+

Fill missing values in specified columns using the specified method and/ or value.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
columnsNoneTypeNone(str or List[str], optional): Column name(s) to be transformed. If None, all columns are transformed. Defaults to None.
unique_id_colsNoneTypeNone(str or List[str], optional): Col name(s) with unique ids for each row. If None, uses all rows at once. Defaults to None .
methodstrffill(str, optional): The method to use for filling missing values, e.g. ‘ffill’, ‘bfill’. If None, value is used. Defaults to None.
valueint0(scalar or dict or Series, optional): The value to use for filling missing values. If None, method is used. Defaults to None.
+
+
# Test
+df = pd.DataFrame(np.random.rand(20,3), columns=["a", "b", "c"])
+df.loc[np.random.rand(20) > .5, 'a'] = np.nan
+df["datetime"] = pd.date_range("2020-01-01", periods=20)
+df['user_id'] = np.sort(np.random.randint(0, 2, 20))
+df = df.iloc[[0, 2, 2, 3, 5, 6, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]
+df.reset_index(drop=True, inplace=True)
+display(df)
+tfm = TSFillMissing(columns="a", method="ffill", value=0)
+df = tfm.fit_transform(df)
+display(df)
+test_eq(df['a'].isna().sum(), 0)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_id
0NaN0.0599430.1309742020-01-010
10.7341510.3413190.4785282020-01-030
20.7341510.3413190.4785282020-01-030
30.9288600.3319720.4653372020-01-040
4NaN0.6313750.4263982020-01-060
50.5481450.1746470.2959322020-01-070
60.5481450.1746470.2959322020-01-070
7NaN0.5768810.5639202020-01-090
80.5002790.0693940.0898772020-01-100
90.6009120.3409590.9172682020-01-110
100.4065910.1432810.7147192020-01-120
11NaN0.5254700.6978332020-01-131
12NaN0.7921910.6763612020-01-141
13NaN0.9459250.2958242020-01-151
14NaN0.2719550.2178912020-01-161
15NaN0.6337120.5934612020-01-171
160.0162430.7287780.3235302020-01-181
17NaN0.5565780.3427312020-01-191
180.1345760.0944190.8315182020-01-201
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_id
00.0000000.0599430.1309742020-01-010
10.7341510.3413190.4785282020-01-030
20.7341510.3413190.4785282020-01-030
30.9288600.3319720.4653372020-01-040
40.9288600.6313750.4263982020-01-060
50.5481450.1746470.2959322020-01-070
60.5481450.1746470.2959322020-01-070
70.5481450.5768810.5639202020-01-090
80.5002790.0693940.0898772020-01-100
90.6009120.3409590.9172682020-01-110
100.4065910.1432810.7147192020-01-120
110.4065910.5254700.6978332020-01-131
120.4065910.7921910.6763612020-01-141
130.4065910.9459250.2958242020-01-151
140.4065910.2719550.2178912020-01-161
150.4065910.6337120.5934612020-01-171
160.0162430.7287780.3235302020-01-181
170.0162430.5565780.3427312020-01-191
180.1345760.0944190.8315182020-01-201
+ +
+
+
+
+

source

+
+
+

TSMissingnessEncoder

+
+
 TSMissingnessEncoder (columns=None)
+
+

Base class for all estimators in scikit-learn.

+

Inheriting from this class provides default implementations of:

+
    +
  • setting and getting parameters used by GridSearchCV and friends;
  • +
  • textual and HTML representation displayed in terminals and IDEs;
  • +
  • estimator serialization;
  • +
  • parameters validation;
  • +
  • data validation;
  • +
  • feature names validation.
  • +
+

Read more in the :ref:User Guide <rolling_your_own_estimator>.

+
+
# Test
+df = pd.DataFrame(np.random.rand(20,3), columns=["a", "b", "c"])
+df.loc[np.random.rand(20) > .5, 'a'] = np.nan
+df["datetime"] = pd.date_range("2020-01-01", periods=20)
+df['user_id'] = np.sort(np.random.randint(0, 2, 20))
+df = df.iloc[[0, 2, 2, 3, 5, 6, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]
+df.reset_index(drop=True, inplace=True)
+display(df)
+tfm = TSMissingnessEncoder(columns="a")
+df = tfm.fit_transform(df)
+display(df)
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_id
00.8736190.9955690.5827142020-01-010
10.4027040.6725070.6821922020-01-030
20.4027040.6725070.6821922020-01-030
3NaN0.1332100.6323962020-01-040
40.7006110.7534720.8728592020-01-060
5NaN0.7302490.6191732020-01-070
6NaN0.7302490.6191732020-01-070
7NaN0.6171060.8499592020-01-090
80.1962460.1255500.9634802020-01-101
90.1080450.4784910.5855642020-01-111
10NaN0.0860320.0570272020-01-121
110.1054830.5855880.5443452020-01-131
120.2337410.6377740.8200682020-01-141
13NaN0.4981300.6893102020-01-151
14NaN0.3077710.6136382020-01-161
150.8979350.8099240.5831302020-01-171
160.7302220.3648220.6409662020-01-181
170.4661820.1899360.7017382020-01-191
18NaN0.3586220.9113392020-01-201
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
abcdatetimeuser_ida_missing
00.8736190.9955690.5827142020-01-0100
10.4027040.6725070.6821922020-01-0300
20.4027040.6725070.6821922020-01-0300
3NaN0.1332100.6323962020-01-0401
40.7006110.7534720.8728592020-01-0600
5NaN0.7302490.6191732020-01-0701
6NaN0.7302490.6191732020-01-0701
7NaN0.6171060.8499592020-01-0901
80.1962460.1255500.9634802020-01-1010
90.1080450.4784910.5855642020-01-1110
10NaN0.0860320.0570272020-01-1211
110.1054830.5855880.5443452020-01-1310
120.2337410.6377740.8200682020-01-1410
13NaN0.4981300.6893102020-01-1511
14NaN0.3077710.6136382020-01-1611
150.8979350.8099240.5831302020-01-1710
160.7302220.3648220.6409662020-01-1810
170.4661820.1899360.7017382020-01-1910
18NaN0.3586220.9113392020-01-2011
+ +
+
+
+

With these sklearn preprocessing API transforms it’s possible to build data preprocessing pipelines like this one:

+
from sklearn.pipeline import Pipeline
+
+cont_cols = ['cont_0', 'cont_1', 'cont_2', 'cont_3', 'cont_4', 'cont_5']
+pipe = Pipeline([
+    ('shrinker', TSShrinkDataFrame()), 
+    ('drop_duplicates', TSDropDuplicates('date', unique_id_cols='user_id')),
+    ('add_mts', TSAddMissingTimestamps(datetime_col='date', unique_id_cols='user_id', freq='D', range_by_group=False)),
+    ('onehot_encoder', TSOneHotEncoder(['cat_0'])),
+    ('cat_encoder', TSCategoricalEncoder(['user_id', 'cat_1'])),
+    ('steps_since_start', TSStepsSinceStart('date', datetime_unit='D', start_datetime='2017-01-01'), dtype=np.int32),
+    ('missing_encoder', TSMissingnessEncoder(['cont_1'])),
+    ('fill_missing', TSFillMissing(cont_cols, unique_id_cols='user_id', value=0)),
+    ], 
+    verbose=True)
+df = pipe.fit_transform(df)
+
+
+

y transforms

+
+

source

+
+

Preprocessor

+
+
 Preprocessor (preprocessor, **kwargs)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+
# Standardize
+from tsai.data.validation import TimeSplitter
+
+
+
y = random_shuffle(np.random.randn(1000) * 10 + 5)
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(StandardScaler)
+preprocessor.fit(y[splits[0]])
+y_tfm = preprocessor.transform(y)
+test_close(preprocessor.inverse_transform(y_tfm), y)
+plt.hist(y, 50, label='ori',)
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# RobustScaler
+y = random_shuffle(np.random.randn(1000) * 10 + 5)
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(RobustScaler)
+preprocessor.fit(y[splits[0]])
+y_tfm = preprocessor.transform(y)
+test_close(preprocessor.inverse_transform(y_tfm), y)
+plt.hist(y, 50, label='ori',)
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# Normalize
+y = random_shuffle(np.random.rand(1000) * 3 + .5)
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(Normalizer)
+preprocessor.fit(y[splits[0]])
+y_tfm = preprocessor.transform(y)
+test_close(preprocessor.inverse_transform(y_tfm), y)
+plt.hist(y, 50, label='ori',)
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# BoxCox
+y = random_shuffle(np.random.rand(1000) * 10 + 5)
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(BoxCox)
+preprocessor.fit(y[splits[0]])
+y_tfm = preprocessor.transform(y)
+test_close(preprocessor.inverse_transform(y_tfm), y)
+plt.hist(y, 50, label='ori',)
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# YeoJohnshon
+y = random_shuffle(np.random.randn(1000) * 10 + 5)
+y = np.random.beta(.5, .5, size=1000)
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(YeoJohnshon)
+preprocessor.fit(y[splits[0]])
+y_tfm = preprocessor.transform(y)
+test_close(preprocessor.inverse_transform(y_tfm), y)
+plt.hist(y, 50, label='ori',)
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
# QuantileTransformer
+y = - np.random.beta(1, .5, 10000) * 10
+splits = TimeSplitter()(y)
+preprocessor = Preprocessor(Quantile)
+preprocessor.fit(y[splits[0]])
+plt.hist(y, 50, label='ori',)
+y_tfm = preprocessor.transform(y)
+plt.legend(loc='best')
+plt.show()
+plt.hist(y_tfm, 50, label='tfm')
+plt.legend(loc='best')
+plt.show()
+test_close(preprocessor.inverse_transform(y_tfm), y, 1e-1)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

ReLabeler

+
+
 ReLabeler (cm)
+
+

Changes the labels in a dataset based on a dictionary (class mapping) Args: cm = class mapping dictionary

+
+
vals = {0:'a', 1:'b', 2:'c', 3:'d', 4:'e'}
+y = np.array([vals[i] for i in np.random.randint(0, 5, 20)])
+labeler = ReLabeler(dict(a='x', b='x', c='y', d='z', e='z'))
+y_new = labeler(y)
+test_eq(y.shape, y_new.shape)
+y, y_new
+
+
(array(['d', 'd', 'a', 'd', 'b', 'e', 'a', 'd', 'b', 'c', 'b', 'e', 'b',
+        'b', 'a', 'e', 'd', 'e', 'c', 'e'], dtype='<U1'),
+ array(['z', 'z', 'x', 'z', 'x', 'z', 'x', 'z', 'x', 'y', 'x', 'z', 'x',
+        'x', 'x', 'z', 'z', 'z', 'y', 'z'], dtype='<U1'))
+
+
+ + +
+
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.tabular.html b/data.tabular.html new file mode 100644 index 000000000..cc4d527c6 --- /dev/null +++ b/data.tabular.html @@ -0,0 +1,2259 @@ + + + + + + + + + +tsai - Time Series Tabular Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Time Series Tabular Data

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Main Tabular functions used throughout the library. This is helpful when you have additional time series data like metadata, time series features, etc.

+
+
+

source

+
+

get_tabular_ds

+
+
 get_tabular_ds (df, procs=[<class 'fastai.tabular.core.Categorify'>,
+                 <class 'fastai.tabular.core.FillMissing'>, <class
+                 'fastai.data.transforms.Normalize'>], cat_names=None,
+                 cont_names=None, y_names=None, groupby=None,
+                 y_block=None, splits=None, do_setup=True, inplace=False,
+                 reduce_memory=True, device=None)
+
+
+

source

+
+
+

get_tabular_dls

+
+
 get_tabular_dls (df, procs=[<class 'fastai.tabular.core.Categorify'>,
+                  <class 'fastai.tabular.core.FillMissing'>, <class
+                  'fastai.data.transforms.Normalize'>], cat_names=None,
+                  cont_names=None, y_names=None, bs=64, y_block=None,
+                  splits=None, do_setup=True, inplace=False,
+                  reduce_memory=True, device=None,
+                  path:Union[str,pathlib.Path]='.')
+
+
+

source

+
+
+

preprocess_df

+
+
 preprocess_df (df, procs=[<class 'fastai.tabular.core.Categorify'>,
+                <class 'fastai.tabular.core.FillMissing'>, <class
+                'fastai.data.transforms.Normalize'>], cat_names=None,
+                cont_names=None, y_names=None, sample_col=None,
+                reduce_memory=True)
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable
+
+cat_names = ['workclass', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
+             'capital-gain', 'capital-loss', 'native-country']
+cont_names = ['age', 'fnlwgt', 'hours-per-week']
+target = ['salary']
+splits = RandomSplitter()(range_of(df))
+
+dls = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names='salary', splits=splits, bs=512, device=device)
+dls.show_batch()
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
workclasseducationeducation-nummarital-statusoccupationrelationshipracesexcapital-gaincapital-lossnative-countryagefnlwgthours-per-weeksalary
0PrivateSome-college10.0DivorcedExec-managerialNot-in-familyWhiteMale00United-States48.000000190072.00000550.000000>=50k
1Self-emp-not-incSome-college10.0Married-civ-spouseSalesHusbandWhiteMale00United-States72.000001284120.00296440.000000<50k
2PrivateSome-college10.0Married-civ-spouseProtective-servHusbandBlackMale00United-States72.00000153684.00249740.000000<50k
3Self-emp-incSome-college10.0Married-civ-spouseFarming-fishingHusbandWhiteMale00United-States47.000000337049.99887540.000000<50k
4PrivateHS-grad9.0DivorcedCraft-repairNot-in-familyWhiteMale00United-States46.000000207677.00070730.000000<50k
5Private5th-6th3.0DivorcedPriv-house-servUnmarriedWhiteFemale00Mexico45.000000265082.99914235.000000<50k
6PrivateAssoc-acdm12.0Never-marriedOther-serviceNot-in-familyWhiteFemale00United-States28.000000150296.00132879.999999<50k
7PrivateHS-grad9.0Married-civ-spouseExec-managerialHusbandWhiteMale00United-States50.00000094080.99935340.000000>=50k
8PrivateAssoc-voc11.0Married-civ-spouseExec-managerialHusbandWhiteMale00Germany58.000000235624.00030240.000000>=50k
9PrivateHS-grad9.0Never-marriedOther-serviceUnmarriedBlackFemale00Japan29.000000419721.00899640.000000<50k
+
+
+
+
metrics = mae if dls.c == 1 else accuracy
+learn = tabular_learner(dls, layers=[200, 100], y_range=None, metrics=metrics)
+learn.fit(1, 1e-2)
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
00.3495250.2889220.86609300:05
+
+
+
+
learn.dls.one_batch()
+
+
(tensor([[  5,  12,   9,  ...,   1,   1,  21],
+         [  1,  10,  13,  ...,   1,   1,   3],
+         [  5,   4,   2,  ...,   1,   1,   6],
+         ...,
+         [  5,   6,   4,  ...,   1,   1,  40],
+         [  3,  10,  13,  ...,   1,   1,  40],
+         [  5,  12,   9,  ..., 116,   1,  40]]),
+ tensor([[-0.2593,  0.1234,  1.1829],
+         [-0.9913, -1.4041, -0.0347],
+         [-0.1129,  0.4583, -0.0347],
+         ...,
+         [-1.5769, -0.1989,  0.3712],
+         [ 0.4727, -1.4400,  0.3712],
+         [ 1.5708, -0.2222, -0.0347]]),
+ tensor([[1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [1],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [1],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [1],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [0],
+         [0],
+         [1],
+         [0],
+         [0],
+         [1],
+         [1]], dtype=torch.int8))
+
+
+
+
learn.model
+
+
TabularModel(
+  (embeds): ModuleList(
+    (0): Embedding(10, 6)
+    (1): Embedding(17, 8)
+    (2): Embedding(17, 8)
+    (3): Embedding(8, 5)
+    (4): Embedding(16, 8)
+    (5): Embedding(7, 5)
+    (6): Embedding(6, 4)
+    (7): Embedding(3, 3)
+    (8): Embedding(117, 23)
+    (9): Embedding(90, 20)
+    (10): Embedding(43, 13)
+  )
+  (emb_drop): Dropout(p=0.0, inplace=False)
+  (bn_cont): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (layers): Sequential(
+    (0): LinBnDrop(
+      (0): Linear(in_features=106, out_features=200, bias=False)
+      (1): ReLU(inplace=True)
+      (2): BatchNorm1d(200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (1): LinBnDrop(
+      (0): Linear(in_features=200, out_features=100, bias=False)
+      (1): ReLU(inplace=True)
+      (2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (2): LinBnDrop(
+      (0): Linear(in_features=100, out_features=2, bias=True)
+    )
+  )
+)
+
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+cat_names = ['workclass', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
+             'capital-gain', 'capital-loss', 'native-country']
+cont_names = ['age', 'fnlwgt', 'hours-per-week']
+target = ['salary']
+df, procs = preprocess_df(df, procs=[Categorify, FillMissing, Normalize], cat_names=cat_names, cont_names=cont_names, y_names=target, 
+                          sample_col=None, reduce_memory=True)
+df.head()
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
workclasseducationeducation-nummarital-statusoccupationrelationshipracesexcapital-gaincapital-lossnative-countryagefnlwgthours-per-weeksalary
0581230651148400.763796-0.838084-0.0354291
151314152521011400.3972330.4449870.3695191
25120105311140-0.042642-0.886734-0.6833480
3615153111221140-0.042642-0.728873-0.0354291
47603963111400.250608-1.0183140.7744680
+ +
+
+
+
+
procs.classes, procs.means, procs.stds
+
+
({'workclass': ['#na#', ' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'],
+  'education': ['#na#', ' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'],
+  'education-num': ['#na#', 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],
+  'marital-status': ['#na#', ' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'],
+  'occupation': ['#na#', ' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'],
+  'relationship': ['#na#', ' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'],
+  'race': ['#na#', ' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'],
+  'sex': ['#na#', ' Female', ' Male'],
+  'capital-gain': ['#na#', 0, 114, 401, 594, 914, 991, 1055, 1086, 1111, 1151, 1173, 1409, 1424, 1455, 1471, 1506, 1639, 1797, 1831, 1848, 2009, 2036, 2050, 2062, 2105, 2174, 2176, 2202, 2228, 2290, 2329, 2346, 2354, 2387, 2407, 2414, 2463, 2538, 2580, 2597, 2635, 2653, 2829, 2885, 2907, 2936, 2961, 2964, 2977, 2993, 3103, 3137, 3273, 3325, 3411, 3418, 3432, 3456, 3464, 3471, 3674, 3781, 3818, 3887, 3908, 3942, 4064, 4101, 4386, 4416, 4508, 4650, 4687, 4787, 4865, 4931, 4934, 5013, 5060, 5178, 5455, 5556, 5721, 6097, 6360, 6418, 6497, 6514, 6723, 6767, 6849, 7298, 7430, 7443, 7688, 7896, 7978, 8614, 9386, 9562, 10520, 10566, 10605, 11678, 13550, 14084, 14344, 15020, 15024, 15831, 18481, 20051, 22040, 25124, 25236, 27828, 34095, 41310, 99999],
+  'capital-loss': ['#na#', 0, 155, 213, 323, 419, 625, 653, 810, 880, 974, 1092, 1138, 1258, 1340, 1380, 1408, 1411, 1485, 1504, 1539, 1564, 1573, 1579, 1590, 1594, 1602, 1617, 1628, 1648, 1651, 1668, 1669, 1672, 1719, 1721, 1726, 1735, 1740, 1741, 1755, 1762, 1816, 1825, 1844, 1848, 1876, 1887, 1902, 1944, 1974, 1977, 1980, 2001, 2002, 2042, 2051, 2057, 2080, 2129, 2149, 2163, 2174, 2179, 2201, 2205, 2206, 2231, 2238, 2246, 2258, 2267, 2282, 2339, 2352, 2377, 2392, 2415, 2444, 2457, 2467, 2472, 2489, 2547, 2559, 2603, 2754, 2824, 3004, 3683, 3770, 3900, 4356],
+  'native-country': ['#na#', ' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia']},
+ {'age': 38.58164675532078,
+  'fnlwgt': 189778.36651208502,
+  'hours-per-week': 40.437455852092995},
+ {'age': 13.640223192304274,
+  'fnlwgt': 105548.3568809908,
+  'hours-per-week': 12.347239175707989})
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.transforms.html b/data.transforms.html new file mode 100644 index 000000000..3c2d26082 --- /dev/null +++ b/data.transforms.html @@ -0,0 +1,2187 @@ + + + + + + + + + +tsai - Time Series Data Augmentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Time Series Data Augmentation

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functions used to transform TSTensors (Data Augmentation)

+
+
+
from tsai.data.core import TSCategorize
+from tsai.data.external import get_UCR_data
+from tsai.data.preprocessing import TSStandardize
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms = [None, TSCategorize()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms, bs=128)
+xb, yb = next(iter(dls.train))
+
+
+

source

+
+

TSIdentity

+
+
 TSIdentity (magnitude=None, **kwargs)
+
+

Applies the identity tfm to a TSTensor batch

+
+
test_eq(TSIdentity()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSShuffle_HLs

+
+
 TSShuffle_HLs (magnitude=1.0, ex=None, **kwargs)
+
+

Randomly shuffles HIs/LOs of an OHLC TSTensor batch

+
+
test_eq(TSShuffle_HLs()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSShuffleSteps

+
+
 TSShuffleSteps (magnitude=1.0, ex=None, **kwargs)
+
+

Randomly shuffles consecutive sequence datapoints in batch

+
+
t = TSTensor(torch.arange(11).float())
+tt_ = []
+for _ in range(1000):
+    tt = TSShuffleSteps()(t, split_idx=0)
+    test_eq(len(set(tt.tolist())), len(t))
+    test_ne(tt, t)
+    tt_.extend([t for i,t in enumerate(tt) if t!=i])
+x, y = np.unique(tt_, return_counts=True) # This is to visualize distribution which should be equal for all and half for first and last items
+plt.bar(x, y);
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSGaussianNoise

+
+
 TSGaussianNoise (magnitude=0.5, additive=True, ex=None, **kwargs)
+
+

Applies additive or multiplicative gaussian noise

+
+
test_eq(TSGaussianNoise(.1, additive=True)(xb, split_idx=0).shape, xb.shape)
+test_eq(TSGaussianNoise(.1, additive=False)(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSMagMulNoise

+
+
 TSMagMulNoise (magnitude=1, ex=None, **kwargs)
+
+

Applies multiplicative noise on the y-axis for each step of a TSTensor batch

+
+

source

+
+
+

TSMagAddNoise

+
+
 TSMagAddNoise (magnitude=1, ex=None, **kwargs)
+
+

Applies additive noise on the y-axis for each step of a TSTensor batch

+
+
test_eq(TSMagAddNoise()(xb, split_idx=0).shape, xb.shape)
+test_eq(TSMagMulNoise()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSMagAddNoise()(xb, split_idx=0), xb)
+test_ne(TSMagMulNoise()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

random_cum_linear_generator

+
+
 random_cum_linear_generator (o, magnitude=0.1)
+
+
+

source

+
+
+

random_cum_noise_generator

+
+
 random_cum_noise_generator (o, magnitude=0.1, noise=None)
+
+
+

source

+
+
+

random_cum_curve_generator

+
+
 random_cum_curve_generator (o, magnitude=0.1, order=4, noise=None)
+
+
+

source

+
+
+

random_curve_generator

+
+
 random_curve_generator (o, magnitude=0.1, order=4, noise=None)
+
+
+

source

+
+
+

TSTimeNoise

+
+
 TSTimeNoise (magnitude=0.1, ex=None, **kwargs)
+
+

Applies noise to each step in the x-axis of a TSTensor batch based on smooth random curve

+
+
test_eq(TSTimeNoise()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSTimeNoise()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSMagWarp

+
+
 TSMagWarp (magnitude=0.02, ord=4, ex=None, **kwargs)
+
+

Applies warping to the y-axis of a TSTensor batch based on a smooth random curve

+
+
test_eq(TSMagWarp()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSMagWarp()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSTimeWarp

+
+
 TSTimeWarp (magnitude=0.1, ord=6, ex=None, **kwargs)
+
+

Applies time warping to the x-axis of a TSTensor batch based on a smooth random curve

+
+
test_eq(TSTimeWarp()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSTimeWarp()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSWindowWarp

+
+
 TSWindowWarp (magnitude=0.1, ex=None, **kwargs)
+
+

Applies window slicing to the x-axis of a TSTensor batch based on a random linear curve based on https://halshs.archives-ouvertes.fr/halshs-01357973/document

+
+
test_eq(TSWindowWarp()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSMagScalePerVar

+
+
 TSMagScalePerVar (magnitude=0.5, ex=None, **kwargs)
+
+

Applies per_var scaling to the y-axis of a TSTensor batch based on a scalar

+
+

source

+
+
+

TSMagScale

+
+
 TSMagScale (magnitude=0.5, ex=None, **kwargs)
+
+

Applies scaling to the y-axis of a TSTensor batch based on a scalar

+
+
test_eq(TSMagScale()(xb, split_idx=0).shape, xb.shape)
+test_eq(TSMagScalePerVar()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSMagScale()(xb, split_idx=0), xb)
+test_ne(TSMagScalePerVar()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

test_interpolate

+
+
 test_interpolate (mode='linear')
+
+
+
# Run the test
+test_interpolate('linear')
+
+
linear interpolation is not supported by mps. You can try a different mode
+Error: The operator 'aten::upsample_linear1d.out' is not currently implemented for the MPS device. If you want this op to be added in priority during the prototype phase of this feature, please comment on https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be slower than running natively on MPS.
+
+
+
False
+
+
+
+
test_interpolate('nearest')
+
+
True
+
+
+
+

source

+
+
+

TSRandomResizedCrop

+
+
 TSRandomResizedCrop (magnitude=0.1, size=None, scale=None, ex=None,
+                      mode='nearest', **kwargs)
+
+

Randomly amplifies a sequence focusing on a random section of the steps

+
+
if test_interpolate('nearest'):
+    test_eq(TSRandomResizedCrop(.5)(xb, split_idx=0).shape, xb.shape)
+    test_ne(TSRandomResizedCrop(size=.8, scale=(.5, 1))(xb, split_idx=0).shape, xb.shape)
+    test_ne(TSRandomResizedCrop(size=20, scale=(.5, 1))(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSWindowSlicing

+
+
 TSWindowSlicing (magnitude=0.1, ex=None, mode='nearest', **kwargs)
+
+

Randomly extracts an resize a ts slice based on https://halshs.archives-ouvertes.fr/halshs-01357973/document

+
+
if test_interpolate('nearest'):
+    test_eq(TSWindowSlicing()(xb, split_idx=0).shape, xb.shape)
+    test_ne(TSWindowSlicing()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSRandomZoomOut

+
+
 TSRandomZoomOut (magnitude=0.1, ex=None, mode='nearest', **kwargs)
+
+

Randomly compresses a sequence on the x-axis

+
+
if test_interpolate('nearest'):
+    test_eq(TSRandomZoomOut(.5)(xb, split_idx=0).shape, xb.shape)#
+
+
+

source

+
+
+

TSRandomTimeScale

+
+
 TSRandomTimeScale (magnitude=0.1, ex=None, mode='nearest', **kwargs)
+
+

Randomly amplifies/ compresses a sequence on the x-axis keeping the same length

+
+
if test_interpolate('nearest'):
+    test_eq(TSRandomTimeScale(.5)(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSRandomTimeStep

+
+
 TSRandomTimeStep (magnitude=0.02, ex=None, mode='nearest', **kwargs)
+
+

Compresses a sequence on the x-axis by randomly selecting sequence steps and interpolating to previous size

+
+
if test_interpolate('nearest'):
+    test_eq(TSRandomTimeStep()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSResampleSteps

+
+
 TSResampleSteps (step_pct=1.0, same_seq_len=True, magnitude=None,
+                  **kwargs)
+
+

Transform that randomly selects and sorts sequence steps (with replacement) maintaining the sequence length

+
+
test_eq(TSResampleSteps(step_pct=.9, same_seq_len=False)(xb, split_idx=0).shape[-1], round(.9*xb.shape[-1]))
+test_eq(TSResampleSteps(step_pct=.9, same_seq_len=True)(xb, split_idx=0).shape[-1], xb.shape[-1])
+
+
+

source

+
+
+

TSBlur

+
+
 TSBlur (magnitude=1.0, ex=None, filt_len=None, **kwargs)
+
+

Blurs a sequence applying a filter of type [1, 0, 1]

+
+
test_eq(TSBlur(filt_len=7)(xb, split_idx=0).shape, xb.shape)
+test_ne(TSBlur()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSSmooth

+
+
 TSSmooth (magnitude=1.0, ex=None, filt_len=None, **kwargs)
+
+

Smoothens a sequence applying a filter of type [1, 5, 1]

+
+
test_eq(TSSmooth(filt_len=7)(xb, split_idx=0).shape, xb.shape)
+test_ne(TSSmooth()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSFreqDenoise

+
+
 TSFreqDenoise (magnitude=0.1, ex=None, wavelet='db4', level=2, thr=None,
+                thr_mode='hard', pad_mode='per', **kwargs)
+
+

Denoises a sequence applying a wavelet decomposition method

+
+

source

+
+
+

maddest

+
+
 maddest (d, axis=None)
+
+
+
try: import pywt
+except ImportError: pass
+
+
+
if 'pywt' in dir():
+    test_eq(TSFreqDenoise()(xb, split_idx=0).shape, xb.shape)
+    test_ne(TSFreqDenoise()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSRandomFreqNoise

+
+
 TSRandomFreqNoise (magnitude=0.1, ex=None, wavelet='db4', level=2,
+                    mode='constant', **kwargs)
+
+

Applys random noise using a wavelet decomposition method

+
+
if 'pywt' in dir():
+    test_eq(TSRandomFreqNoise()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSRandomResizedLookBack

+
+
 TSRandomResizedLookBack (magnitude=0.1, mode='nearest', **kwargs)
+
+

Selects a random number of sequence steps starting from the end and return an output of the same shape

+
+
if test_interpolate('nearest'):
+    for i in range(100):
+        o = TSRandomResizedLookBack()(xb, split_idx=0)
+        test_eq(o.shape[-1], xb.shape[-1])
+
+
+

source

+
+
+

TSRandomLookBackOut

+
+
 TSRandomLookBackOut (magnitude=0.1, **kwargs)
+
+

Selects a random number of sequence steps starting from the end and set them to zero

+
+
for i in range(100):
+    o = TSRandomLookBackOut()(xb, split_idx=0)
+    test_eq(o.shape[-1], xb.shape[-1])
+
+
+

source

+
+
+

TSVarOut

+
+
 TSVarOut (magnitude=0.05, ex=None, **kwargs)
+
+

Set the value of a random number of variables to zero

+
+
test_eq(TSVarOut()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSCutOut

+
+
 TSCutOut (magnitude=0.05, ex=None, **kwargs)
+
+

Sets a random section of the sequence to zero

+
+
test_eq(TSCutOut()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSTimeStepOut

+
+
 TSTimeStepOut (magnitude=0.05, ex=None, **kwargs)
+
+

Sets random sequence steps to zero

+
+
test_eq(TSTimeStepOut()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSRandomCropPad

+
+
 TSRandomCropPad (magnitude=0.05, ex=None, **kwargs)
+
+

Crops a section of the sequence of a random length

+
+
test_eq(TSRandomCropPad()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSMaskOut

+
+
 TSMaskOut (magnitude=0.1, compensate:bool=False, ex=None, **kwargs)
+
+

Applies a random mask

+
+
test_eq(TSMaskOut()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSMaskOut()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSInputDropout

+
+
 TSInputDropout (magnitude=0.0, ex=None, **kwargs)
+
+

Applies input dropout with required_grad=False

+
+
test_eq(TSInputDropout(.1)(xb, split_idx=0).shape, xb.shape)
+test_ne(TSInputDropout(.1)(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSTranslateX

+
+
 TSTranslateX (magnitude=0.1, ex=None, **kwargs)
+
+

Moves a selected sequence window a random number of steps

+
+
test_eq(TSTranslateX()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSRandomShift

+
+
 TSRandomShift (magnitude=0.02, ex=None, **kwargs)
+
+

Shifts and splits a sequence

+
+
test_eq(TSRandomShift()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSHorizontalFlip

+
+
 TSHorizontalFlip (magnitude=1.0, ex=None, **kwargs)
+
+

Flips the sequence along the x-axis

+
+
test_eq(TSHorizontalFlip()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSHorizontalFlip()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSRandomTrend

+
+
 TSRandomTrend (magnitude=0.1, ex=None, **kwargs)
+
+

Randomly rotates the sequence along the z-axis

+
+
test_eq(TSRandomTrend()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSVerticalFlip

+
+
 TSVerticalFlip (magnitude=1.0, ex=None, **kwargs)
+
+

Applies a negative value to the time sequence

+
+
test_eq(TSVerticalFlip()(xb, split_idx=0).shape, xb.shape)
+test_ne(TSVerticalFlip()(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSResize

+
+
 TSResize (magnitude=-0.5, size=None, ex=None, mode='nearest', **kwargs)
+
+

Resizes the sequence length of a time series

+
+
if test_interpolate('nearest'):
+    for sz in np.linspace(.2, 2, 10): test_eq(TSResize(sz)(xb, split_idx=0).shape[-1], int(round(xb.shape[-1]*(1+sz))))
+    test_ne(TSResize(1)(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TSRandomSize

+
+
 TSRandomSize (magnitude=0.1, ex=None, mode='nearest', **kwargs)
+
+

Randomly resizes the sequence length of a time series

+
+
if test_interpolate('nearest'):
+    seq_len_ = []
+    for i in range(100):
+        o = TSRandomSize(.5)(xb, split_idx=0)
+        seq_len_.append(o.shape[-1])
+    test_lt(min(seq_len_), xb.shape[-1])
+    test_gt(max(seq_len_), xb.shape[-1])
+
+
+

source

+
+
+

TSRandomLowRes

+
+
 TSRandomLowRes (magnitude=0.5, ex=None, mode='nearest', **kwargs)
+
+

Randomly resizes the sequence length of a time series to a lower resolution

+
+

source

+
+
+

TSDownUpScale

+
+
 TSDownUpScale (magnitude=0.5, ex=None, mode='nearest', **kwargs)
+
+

Downscales a time series and upscales it again to previous sequence length

+
+
if test_interpolate('nearest'):
+    test_eq(TSDownUpScale()(xb, split_idx=0).shape, xb.shape)
+
+
+

source

+
+
+

TSRandomDownUpScale

+
+
 TSRandomDownUpScale (magnitude=0.5, ex=None, mode='nearest', **kwargs)
+
+

Randomly downscales a time series and upscales it again to previous sequence length

+
+
if test_interpolate('nearest'):
+    test_eq(TSRandomDownUpScale()(xb, split_idx=0).shape, xb.shape)
+    test_ne(TSDownUpScale()(xb, split_idx=0), xb)
+    test_eq(TSDownUpScale()(xb, split_idx=1), xb)
+
+
+

source

+
+
+

TSRandomConv

+
+
 TSRandomConv (magnitude=0.05, ex=None, ks=[1, 3, 5, 7], **kwargs)
+
+

Applies a convolution with a random kernel and random weights with required_grad=False

+
+
for i in range(5):
+    o = TSRandomConv(magnitude=0.05, ex=None, ks=[1, 3, 5, 7])(xb, split_idx=0)
+    test_eq(o.shape, xb.shape)
+
+
+

source

+
+
+

TSRandom2Value

+
+
 TSRandom2Value (magnitude=0.1, sel_vars=None, sel_steps=None,
+                 static=False, value=nan, **kwargs)
+
+

Randomly sets selected variables of type TSTensor to predefined value (default: np.nan)

+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=0.5, sel_vars=None, sel_steps=None, static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[0., 0., 1., 0., 1., 1., 0., 1., 1., 0.],
+         [1., 1., 0., 1., 1., 1., 1., 1., 1., 0.],
+         [1., 1., 1., 1., 1., 0., 0., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 0., 1., 1., 0., 1.],
+         [0., 0., 0., 0., 0., 1., 0., 1., 0., 1.],
+         [0., 1., 0., 1., 0., 0., 0., 1., 0., 0.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=0.5, sel_vars=[1], sel_steps=slice(-5, None), static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 0., 1., 0., 0., 0.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 0., 1., 0., 0., 0.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=0.5, sel_vars=[1], sel_steps=None, static=True, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=1, sel_vars=1, sel_steps=None, static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=1, sel_vars=[1,2], sel_steps=None, static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=1, sel_vars=1, sel_steps=[1,3,5], static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],
+         [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2, 3, 10))
+TSRandom2Value(magnitude=1, sel_vars=[1,2], sel_steps=[1,3,5], static=False, value=0)(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.]],
+
+        [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],
+         [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2,3,4))
+TSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0).data
+
+
tensor([[[1., nan, nan, 1.],
+         [1., 1., 1., 1.],
+         [1., nan, 1., 1.]],
+
+        [[nan, 1., 1., nan],
+         [1., 1., 1., 1.],
+         [nan, nan, 1., 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2,3,4))
+TSRandom2Value(magnitude=.5, sel_steps=slice(2, None))(t, split_idx=0).data
+
+
tensor([[[1., 1., 1., nan],
+         [1., 1., nan, 1.],
+         [1., 1., nan, nan]],
+
+        [[1., 1., nan, 1.],
+         [1., 1., nan, nan],
+         [1., 1., nan, 1.]]])
+
+
+
+
t = TSTensor(torch.ones(2,3,100))
+test_gt(np.isnan(TSRandom2Value(magnitude=.5)(t, split_idx=0)).sum().item(), 0)
+t = TSTensor(torch.ones(2,3,100))
+test_gt(np.isnan(TSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0)[:, [0,2]]).sum().item(), 0)
+t = TSTensor(torch.ones(2,3,100))
+test_eq(np.isnan(TSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0)[:, 1]).sum().item(), 0)
+
+
+

source

+
+
+

TSMask2Value

+
+
 TSMask2Value (mask_fn, value=nan, sel_vars=None, **kwargs)
+
+

Randomly sets selected variables of type TSTensor to predefined value (default: np.nan)

+
+
t = TSTensor(torch.ones(2,3,100))
+def _mask_fn(o, r=.15, value=np.nan):
+    return torch.rand_like(o) > (1-r)
+test_gt(np.isnan(TSMask2Value(_mask_fn)(t, split_idx=0)).sum().item(), 0)
+
+
+

source

+
+
+

TSSelfDropout

+
+
 TSSelfDropout (p:float=1.0, nm:str=None, before_call:callable=None,
+                **kwargs)
+
+

Applies dropout to a tensor with nan values by rotating axis=0 inplace

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
pfloat1.0Probability of applying Transform
nmstrNone
before_callcallableNoneOptional batchwise preprocessing function
kwargs
+
+

source

+
+
+

self_mask

+
+
 self_mask (o)
+
+
+
t = TSTensor(torch.ones(2,3,100))
+mask = torch.rand_like(t) > .7
+t[mask] = np.nan
+nan_perc = np.isnan(t).float().mean().item()
+t2 = TSSelfDropout()(t, split_idx=0)
+test_gt(torch.isnan(t2).float().mean().item(), nan_perc)
+nan_perc, torch.isnan(t2).float().mean().item()
+
+
(0.30000001192092896, 0.49000000953674316)
+
+
+
+

source

+
+
+

RandAugment

+
+
 RandAugment (tfms:list, N:int=1, M:int=3, **kwargs)
+
+

A transform that before_call its state at each __call__

+
+
test_ne(RandAugment(TSMagAddNoise, N=5, M=10)(xb, split_idx=0), xb)
+
+
+

source

+
+
+

TestTfm

+
+
 TestTfm (tfm, magnitude=1.0, ex=None, **kwargs)
+
+

Utility class to test the output of selected tfms during training

+
+

source

+
+
+

get_tfm_name

+
+
 get_tfm_name (tfm)
+
+
+
test_eq(get_tfm_name(partial(TSMagScale()))==get_tfm_name((partial(TSMagScale()), 0.1, .05))==get_tfm_name(TSMagScale())==get_tfm_name((TSMagScale(), 0.1, .05)), True)
+
+
+
all_TS_randaugs_names = [get_tfm_name(t) for t in all_TS_randaugs]
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.unwindowed.html b/data.unwindowed.html new file mode 100644 index 000000000..622dd487f --- /dev/null +++ b/data.unwindowed.html @@ -0,0 +1,1442 @@ + + + + + + + + + +tsai - Unwindowed datasets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Unwindowed datasets

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functionality that will allow you to create a dataset that applies sliding windows to the input data on the fly. This heavily reduces the size of the input data files, as only the original unwindowed data needs to be stored.

+
+

I’d like to thank both Thomas Capelle (https://github.com/tcapelle) and Xander Dunn (https://github.com/xanderdunn) for their contributions to make this code possible.

+
+

source

+
+

TSUnwindowedDatasets

+
+
 TSUnwindowedDatasets (dataset, splits)
+
+

Base class for lists with subsets

+
+

source

+
+
+

TSUnwindowedDataset

+
+
 TSUnwindowedDataset (X=None, y=None, y_func=None, window_size=1,
+                      stride=1, drop_start=0, drop_end=0, seq_first=True,
+                      **kwargs)
+
+

Initialize self. See help(type(self)) for accurate signature.

+
+
def y_func(y): return y.astype('float').mean(1)
+
+

This approach works with both univariate and multivariate data.

+
    +
  • Univariate: we’ll use a simple array with 20 values, one with the seq_len first (X0), the other with seq_len second (X1).
  • +
  • Multivariate: we’ll use 2 time series arrays, one with the seq_len first (X2), the other with seq_len second (X3). No sliding window has been applied to them yet.
  • +
+
+
# Univariate
+X0 = np.arange(20).astype(float)
+X1 = np.arange(20).reshape(1, -1).astype(float)
+X0.shape, X0, X1.shape, X1
+
+
((20,),
+ array([ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9., 10., 11., 12.,
+        13., 14., 15., 16., 17., 18., 19.]),
+ (1, 20),
+ array([[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9., 10., 11., 12.,
+         13., 14., 15., 16., 17., 18., 19.]]))
+
+
+
+
# Multivariate
+X2 = np.arange(20).reshape(-1,1)*np.array([1, 10, 100]).reshape(1,-1).astype(float)
+X3 = np.arange(20).reshape(1,-1)*np.array([1, 10, 100]).reshape(-1,1).astype(float)
+X2.shape, X3.shape, X2, X3
+
+
((20, 3),
+ (3, 20),
+ array([[0.0e+00, 0.0e+00, 0.0e+00],
+        [1.0e+00, 1.0e+01, 1.0e+02],
+        [2.0e+00, 2.0e+01, 2.0e+02],
+        [3.0e+00, 3.0e+01, 3.0e+02],
+        [4.0e+00, 4.0e+01, 4.0e+02],
+        [5.0e+00, 5.0e+01, 5.0e+02],
+        [6.0e+00, 6.0e+01, 6.0e+02],
+        [7.0e+00, 7.0e+01, 7.0e+02],
+        [8.0e+00, 8.0e+01, 8.0e+02],
+        [9.0e+00, 9.0e+01, 9.0e+02],
+        [1.0e+01, 1.0e+02, 1.0e+03],
+        [1.1e+01, 1.1e+02, 1.1e+03],
+        [1.2e+01, 1.2e+02, 1.2e+03],
+        [1.3e+01, 1.3e+02, 1.3e+03],
+        [1.4e+01, 1.4e+02, 1.4e+03],
+        [1.5e+01, 1.5e+02, 1.5e+03],
+        [1.6e+01, 1.6e+02, 1.6e+03],
+        [1.7e+01, 1.7e+02, 1.7e+03],
+        [1.8e+01, 1.8e+02, 1.8e+03],
+        [1.9e+01, 1.9e+02, 1.9e+03]]),
+ array([[0.0e+00, 1.0e+00, 2.0e+00, 3.0e+00, 4.0e+00, 5.0e+00, 6.0e+00,
+         7.0e+00, 8.0e+00, 9.0e+00, 1.0e+01, 1.1e+01, 1.2e+01, 1.3e+01,
+         1.4e+01, 1.5e+01, 1.6e+01, 1.7e+01, 1.8e+01, 1.9e+01],
+        [0.0e+00, 1.0e+01, 2.0e+01, 3.0e+01, 4.0e+01, 5.0e+01, 6.0e+01,
+         7.0e+01, 8.0e+01, 9.0e+01, 1.0e+02, 1.1e+02, 1.2e+02, 1.3e+02,
+         1.4e+02, 1.5e+02, 1.6e+02, 1.7e+02, 1.8e+02, 1.9e+02],
+        [0.0e+00, 1.0e+02, 2.0e+02, 3.0e+02, 4.0e+02, 5.0e+02, 6.0e+02,
+         7.0e+02, 8.0e+02, 9.0e+02, 1.0e+03, 1.1e+03, 1.2e+03, 1.3e+03,
+         1.4e+03, 1.5e+03, 1.6e+03, 1.7e+03, 1.8e+03, 1.9e+03]]))
+
+
+

Now, instead of applying SlidingWindow to create and save the time series that can be consumed by a time series model, we can use a dataset that creates the data on the fly. In this way we avoid the need to create and save large files. This approach is also useful when you want to test different sliding window sizes, as otherwise you would need to create files for every size you want to test.The dataset will create the samples correctly formatted and ready to be passed on to a time series architecture.

+
+
wds0 = TSUnwindowedDataset(X0, window_size=5, stride=2, seq_first=True)[:][0]
+wds1 = TSUnwindowedDataset(X1, window_size=5, stride=2, seq_first=False)[:][0]
+test_eq(wds0, wds1)
+wds0, wds0.data, wds1, wds1.data
+
+
(TSTensor(samples:8, vars:1, len:5, device=cpu),
+ tensor([[[ 0.,  1.,  2.,  3.,  4.]],
+ 
+         [[ 2.,  3.,  4.,  5.,  6.]],
+ 
+         [[ 4.,  5.,  6.,  7.,  8.]],
+ 
+         [[ 6.,  7.,  8.,  9., 10.]],
+ 
+         [[ 8.,  9., 10., 11., 12.]],
+ 
+         [[10., 11., 12., 13., 14.]],
+ 
+         [[12., 13., 14., 15., 16.]],
+ 
+         [[14., 15., 16., 17., 18.]]]),
+ TSTensor(samples:8, vars:1, len:5, device=cpu),
+ tensor([[[ 0.,  1.,  2.,  3.,  4.]],
+ 
+         [[ 2.,  3.,  4.,  5.,  6.]],
+ 
+         [[ 4.,  5.,  6.,  7.,  8.]],
+ 
+         [[ 6.,  7.,  8.,  9., 10.]],
+ 
+         [[ 8.,  9., 10., 11., 12.]],
+ 
+         [[10., 11., 12., 13., 14.]],
+ 
+         [[12., 13., 14., 15., 16.]],
+ 
+         [[14., 15., 16., 17., 18.]]]))
+
+
+
+
wds2 = TSUnwindowedDataset(X2, window_size=5, stride=2, seq_first=True)[:][0]
+wds3 = TSUnwindowedDataset(X3, window_size=5, stride=2, seq_first=False)[:][0]
+test_eq(wds2, wds3)
+wds2, wds3, wds2.data, wds3.data
+
+
(TSTensor(samples:8, vars:3, len:5, device=cpu),
+ TSTensor(samples:8, vars:3, len:5, device=cpu),
+ tensor([[[0.0000e+00, 1.0000e+00, 2.0000e+00, 3.0000e+00, 4.0000e+00],
+          [0.0000e+00, 1.0000e+01, 2.0000e+01, 3.0000e+01, 4.0000e+01],
+          [0.0000e+00, 1.0000e+02, 2.0000e+02, 3.0000e+02, 4.0000e+02]],
+ 
+         [[2.0000e+00, 3.0000e+00, 4.0000e+00, 5.0000e+00, 6.0000e+00],
+          [2.0000e+01, 3.0000e+01, 4.0000e+01, 5.0000e+01, 6.0000e+01],
+          [2.0000e+02, 3.0000e+02, 4.0000e+02, 5.0000e+02, 6.0000e+02]],
+ 
+         [[4.0000e+00, 5.0000e+00, 6.0000e+00, 7.0000e+00, 8.0000e+00],
+          [4.0000e+01, 5.0000e+01, 6.0000e+01, 7.0000e+01, 8.0000e+01],
+          [4.0000e+02, 5.0000e+02, 6.0000e+02, 7.0000e+02, 8.0000e+02]],
+ 
+         [[6.0000e+00, 7.0000e+00, 8.0000e+00, 9.0000e+00, 1.0000e+01],
+          [6.0000e+01, 7.0000e+01, 8.0000e+01, 9.0000e+01, 1.0000e+02],
+          [6.0000e+02, 7.0000e+02, 8.0000e+02, 9.0000e+02, 1.0000e+03]],
+ 
+         [[8.0000e+00, 9.0000e+00, 1.0000e+01, 1.1000e+01, 1.2000e+01],
+          [8.0000e+01, 9.0000e+01, 1.0000e+02, 1.1000e+02, 1.2000e+02],
+          [8.0000e+02, 9.0000e+02, 1.0000e+03, 1.1000e+03, 1.2000e+03]],
+ 
+         [[1.0000e+01, 1.1000e+01, 1.2000e+01, 1.3000e+01, 1.4000e+01],
+          [1.0000e+02, 1.1000e+02, 1.2000e+02, 1.3000e+02, 1.4000e+02],
+          [1.0000e+03, 1.1000e+03, 1.2000e+03, 1.3000e+03, 1.4000e+03]],
+ 
+         [[1.2000e+01, 1.3000e+01, 1.4000e+01, 1.5000e+01, 1.6000e+01],
+          [1.2000e+02, 1.3000e+02, 1.4000e+02, 1.5000e+02, 1.6000e+02],
+          [1.2000e+03, 1.3000e+03, 1.4000e+03, 1.5000e+03, 1.6000e+03]],
+ 
+         [[1.4000e+01, 1.5000e+01, 1.6000e+01, 1.7000e+01, 1.8000e+01],
+          [1.4000e+02, 1.5000e+02, 1.6000e+02, 1.7000e+02, 1.8000e+02],
+          [1.4000e+03, 1.5000e+03, 1.6000e+03, 1.7000e+03, 1.8000e+03]]]),
+ tensor([[[0.0000e+00, 1.0000e+00, 2.0000e+00, 3.0000e+00, 4.0000e+00],
+          [0.0000e+00, 1.0000e+01, 2.0000e+01, 3.0000e+01, 4.0000e+01],
+          [0.0000e+00, 1.0000e+02, 2.0000e+02, 3.0000e+02, 4.0000e+02]],
+ 
+         [[2.0000e+00, 3.0000e+00, 4.0000e+00, 5.0000e+00, 6.0000e+00],
+          [2.0000e+01, 3.0000e+01, 4.0000e+01, 5.0000e+01, 6.0000e+01],
+          [2.0000e+02, 3.0000e+02, 4.0000e+02, 5.0000e+02, 6.0000e+02]],
+ 
+         [[4.0000e+00, 5.0000e+00, 6.0000e+00, 7.0000e+00, 8.0000e+00],
+          [4.0000e+01, 5.0000e+01, 6.0000e+01, 7.0000e+01, 8.0000e+01],
+          [4.0000e+02, 5.0000e+02, 6.0000e+02, 7.0000e+02, 8.0000e+02]],
+ 
+         [[6.0000e+00, 7.0000e+00, 8.0000e+00, 9.0000e+00, 1.0000e+01],
+          [6.0000e+01, 7.0000e+01, 8.0000e+01, 9.0000e+01, 1.0000e+02],
+          [6.0000e+02, 7.0000e+02, 8.0000e+02, 9.0000e+02, 1.0000e+03]],
+ 
+         [[8.0000e+00, 9.0000e+00, 1.0000e+01, 1.1000e+01, 1.2000e+01],
+          [8.0000e+01, 9.0000e+01, 1.0000e+02, 1.1000e+02, 1.2000e+02],
+          [8.0000e+02, 9.0000e+02, 1.0000e+03, 1.1000e+03, 1.2000e+03]],
+ 
+         [[1.0000e+01, 1.1000e+01, 1.2000e+01, 1.3000e+01, 1.4000e+01],
+          [1.0000e+02, 1.1000e+02, 1.2000e+02, 1.3000e+02, 1.4000e+02],
+          [1.0000e+03, 1.1000e+03, 1.2000e+03, 1.3000e+03, 1.4000e+03]],
+ 
+         [[1.2000e+01, 1.3000e+01, 1.4000e+01, 1.5000e+01, 1.6000e+01],
+          [1.2000e+02, 1.3000e+02, 1.4000e+02, 1.5000e+02, 1.6000e+02],
+          [1.2000e+03, 1.3000e+03, 1.4000e+03, 1.5000e+03, 1.6000e+03]],
+ 
+         [[1.4000e+01, 1.5000e+01, 1.6000e+01, 1.7000e+01, 1.8000e+01],
+          [1.4000e+02, 1.5000e+02, 1.6000e+02, 1.7000e+02, 1.8000e+02],
+          [1.4000e+03, 1.5000e+03, 1.6000e+03, 1.7000e+03, 1.8000e+03]]]))
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/data.validation.html b/data.validation.html new file mode 100644 index 000000000..c89be805f --- /dev/null +++ b/data.validation.html @@ -0,0 +1,2954 @@ + + + + + + + + + +tsai - Spliting data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Spliting data

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functions required to perform cross-validation and transform unique time series sequence into multiple samples ready to be used by a time series model.

+
+
+

source

+
+

RandomSplitter

+
+
 RandomSplitter (valid_pct=0.2, seed=None)
+
+

Create function that splits items between train/val with valid_pct randomly.

+
+

source

+
+
+

balance_idx

+
+
 balance_idx (o, shuffle=False, strategy='oversample', random_state=None,
+              verbose=False)
+
+
+

source

+
+
+

leakage_finder

+
+
 leakage_finder (*splits, verbose=True)
+
+

You can pass splits as a tuple, or train, valid, …

+
+

source

+
+
+

check_splits_overlap

+
+
 check_splits_overlap (splits)
+
+
+

source

+
+
+

check_overlap

+
+
 check_overlap (a, b, c=None)
+
+

Checks if there’s overlap between array-like objects

+
+
a = np.arange(10)
+b = np.arange(10, 20)
+test_eq(check_overlap(a, b), False)
+a = np.arange(10)
+b = np.arange(9, 20)
+test_eq(check_overlap(a, b), [9])
+a = np.arange(10)
+b = np.arange(10, 20)
+c = np.arange(20, 30)
+test_eq(check_overlap(a, b, c), False)
+a = np.arange(10)
+b = np.arange(10, 20)
+c = np.arange(10, 30)
+test_eq(check_overlap(a, b, c), ([], [], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
+
+
+
y = np.concatenate([[i] * np.random.randint(10, 100) for i in range(5)])
+train_split = np.random.choice(len(y), int(len(y) * .8), False)
+c, v = np.unique(y[train_split], return_counts=True)
+print(f"{'imbalanced:':25} {c} {v}")
+
+oversampled_train_split = train_split[balance_idx(y[train_split], strategy="oversample")]
+osc, osv = np.unique(y[oversampled_train_split], return_counts=True)
+print(f"{'balanced (oversample):':25} {osc} {osv}")
+test_eq(osv, [max(v)] * len(v))
+
+undersampled_train_split = train_split[balance_idx(y[train_split], strategy="undersample")]
+usc, usv = np.unique(y[undersampled_train_split], return_counts=True)
+print(f"{'balanced (undersample):':25} {usc} {usv}")
+test_eq(usv, [min(v)] * len(v))
+
+
imbalanced:               [0 1 2 3 4] [24 43 64 41  8]
+balanced (oversample):    [0 1 2 3 4] [64 64 64 64 64]
+balanced (undersample):   [0 1 2 3 4] [8 8 8 8 8]
+
+
+
+
l = L(list(concat(np.zeros(5), np.ones(10)).astype(int)))
+balanced_idx = balance_idx(l)
+test_eq(np.mean(l[balanced_idx]), 0.5)
+test_eq(isinstance(balanced_idx, L), True)
+
+l = list(concat(np.zeros(5), np.ones(10)).astype(int))
+balanced_idx = balance_idx(l)
+test_eq(np.mean(L(l)[balanced_idx]), 0.5)
+test_eq(isinstance(balanced_idx, L), True)
+
+a = concat(np.zeros(5), np.ones(10)).astype(int)
+balanced_idx = balance_idx(a)
+test_eq(np.mean(a[balanced_idx]), 0.5)
+test_eq(isinstance(balanced_idx, L), True)
+
+t = concat(torch.zeros(5), torch.ones(10))
+balanced_idx = balance_idx(t, shuffle=True)
+test_eq(t[balanced_idx].mean(), 0.5)
+test_eq(isinstance(balanced_idx, L), True)
+
+
+
a, b = np.arange(100_000), np.arange(100_000, 200_000)
+
+
+
soft_labels = True
+filter_pseudolabels = .5
+balanced_pseudolabels = True
+
+pseudolabels = torch.rand(1000, 3)
+pseudolabels = torch.softmax(pseudolabels, -1) if soft_labels else torch.argmax(pseudolabels, -1)
+hpl = torch.argmax(pseudolabels, -1) if soft_labels else pseudolabels
+
+if filter_pseudolabels and pseudolabels.ndim > 1: 
+    error = 1 - pseudolabels.max(-1).values
+    filt_pl_idx = np.arange(len(error))[error < filter_pseudolabels]
+    filt_pl = pseudolabels[error < filter_pseudolabels]
+    assert len(filt_pl) > 0, 'no filtered pseudolabels'
+    filt_hpl = torch.argmax(filt_pl, -1)
+else: 
+    filt_pl_idx = np.arange(len(pseudolabels))
+    filt_pl = filt_hpl = pseudolabels
+
+
+
pl_split = filt_pl_idx[balance_idx(filt_hpl)] if balanced_pseudolabels else filt_pl_idx
+test_eq(hpl[pl_split].float().mean(), np.mean(np.unique(hpl)))
+
+
+

source

+
+
+

TrainValidTestSplitter

+
+
 TrainValidTestSplitter (n_splits:int=1, valid_size:Union[float,int]=0.2,
+                         test_size:Union[float,int]=0.0,
+                         train_only:bool=False, stratify:bool=True,
+                         balance:bool=False, strategy:str='oversample',
+                         shuffle:bool=True,
+                         random_state:Optional[int]=None,
+                         verbose:bool=False, **kwargs)
+
+

Split items into random train, valid (and test optional) subsets.

+
+

source

+
+
+

plot_splits

+
+
 plot_splits (splits)
+
+
+

source

+
+
+

get_splits

+
+
 get_splits (o, n_splits:int=1, valid_size:float=0.2, test_size:float=0.0,
+             train_only:bool=False,
+             train_size:Union[NoneType,float,int]=None,
+             balance:bool=False, strategy:str='oversample',
+             shuffle:bool=True, stratify:bool=True,
+             check_splits:bool=True, random_state:Optional[int]=None,
+             show_plot:bool=True, verbose:bool=False)
+
+

Arguments: o : object to which splits will be applied, usually target. n_splits : number of folds. Must be an int >= 1. valid_size : size of validation set. Only used if n_splits = 1. If n_splits > 1 valid_size = (1. - test_size) / n_splits. test_size : size of test set. Default = 0. train_only : if True valid set == train set. This may be useful for debugging purposes. train_size : size of the train set used. Default = None (the remainder after assigning both valid and test). Useful for to get learning curves with different train sizes or get a small batch to debug a neural net. balance : whether to balance data so that train always contain the same number of items per class. strategy : strategy to balance data (“undersample” or “oversample”). Default = “oversample”. shuffle : whether to shuffle data before splitting into batches. Note that the samples within each split will be shuffle. stratify : whether to create folds preserving the percentage of samples for each class. check_splits : whether to perform leakage and completion checks. random_state : when shuffle is True, random_state affects the ordering of the indices. Pass an int for reproducible output. show_plot : plot the split distribution

+
+
n_splits                = 5
+valid_size              = 0.2
+test_size               = 0.2
+train_only              = False  # set to True for debugging (valid = train)
+train_size              = 5000
+stratify                = True
+balance                 = False
+shuffle                 = True
+predefined_splits       = None
+show_plot               = True 
+
+
+check_splits = True
+random_state = 23
+
+y = np.random.randint(0, 3, 10000) + 100
+
+splits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, balance=balance, stratify=stratify,
+                    train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)
+splits
+
+
+
+

+
+
+
+
+
(((#5000) [3490,2428,4475,8317,2802,6834,2954,7671,3383,9554...],
+  (#1600) [1680,6677,5879,4428,5511,8312,372,5127,7012,3021...],
+  (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),
+ ((#5000) [3442,4237,470,3901,3808,3793,6286,8546,6254,9530...],
+  (#1600) [9160,5451,3628,143,2054,7225,7124,8057,1405,5089...],
+  (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),
+ ((#5000) [9850,7451,7338,9742,3258,1527,4450,5678,2932,1693...],
+  (#1600) [6186,5970,376,7848,3786,1663,7193,3647,3277,553...],
+  (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),
+ ((#5000) [1853,7308,7375,3851,1852,3820,2601,3868,8718,7190...],
+  (#1600) [4182,6419,6265,4837,168,9627,2500,9951,1610,7547...],
+  (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),
+ ((#5000) [7878,6392,453,4817,4676,5738,6482,4033,8114,7337...],
+  (#1600) [7682,6416,2877,9164,1583,342,2916,4806,8776,2046...],
+  (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]))
+
+
+
+
train_size=256
+y = np.random.randint(0, 3, 1000) + 100
+splits = get_splits(y, train_size=train_size, train_only=True)
+test_eq(splits[0], splits[1])
+test_eq(len(splits[0]), train_size)
+splits
+
+
valid == train
+
+
+
+
+

+
+
+
+
+
((#256) [550,813,388,595,948,198,354,749,175,812...],
+ (#256) [550,813,388,595,948,198,354,749,175,812...])
+
+
+
+

source

+
+
+

get_walk_forward_splits

+
+
 get_walk_forward_splits (o, n_splits=1, train_size=None, valid_size=0.2,
+                          test_size=0.0, anchored=False, gap=0.0,
+                          test_after_valid=True, random_state=None,
+                          show_plot=True)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
o3D object with shape [samples x features x steps] containing the time series we need to split
n_splitsint1# of splits
train_sizeNoneTypeNoneoptional: training set size as an int or a float. None when using and anchored strategy.
valid_sizefloat0.2validation set size as an int or a float
test_sizefloat0.0test set size as an int or a float
anchoredboolFalsestarting point for train set remains the same for all splits
gapfloat0.0# of samples to exclude from the end of each train set before the validation set. Entered as an int or a float
test_after_validboolTrueflag to indicate if validation and test will be samples randomly or sequentially
random_stateNoneTypeNoneinteger that can be used to generate reproducible results
show_plotboolTrueplots the splits created
+
+
o = np.random.rand(10_000, 3,  50) # shape: [samples x features x steps]
+
+splits = get_walk_forward_splits(
+    o, 
+    n_splits=4, 
+    train_size=.6,
+    valid_size=0.1, 
+    test_size=0.1, 
+    anchored = True,
+    gap = 100,
+    test_after_valid = True,
+    random_state = None,
+    show_plot=True,
+)
+
+splits = get_walk_forward_splits(
+    o, 
+    n_splits=3, 
+    train_size=0.3,
+    valid_size=0.1, 
+    test_size=0.1, 
+    anchored = False,
+    gap = 0.,
+    test_after_valid = False,
+    random_state = None,
+    show_plot=True,
+)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

TSSplitter

+
+
 TSSplitter (valid_size=0.2, test_size=0.0, fcst_horizon=0,
+             show_plot=True)
+
+

Create function that splits items between train/val with valid_size without shuffling data.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
valid_sizefloat0.2int or float indicating the validation set size
test_sizefloat0.0int or float indicating the test set size
fcst_horizonint0int that indicates the number of time steps removed at the end of train (and validation)
show_plotboolTrueflag that indicates if a plot showing the splits will be created
+
+
y = np.arange(1000) + 100
+test_eq(TimeSplitter(valid_size=0.2)(y)[1], L(np.arange(800, 1000).tolist()))
+test_eq(TimeSplitter(valid_size=0.2)(y)[0], TimeSplitter(valid_size=200)(y)[0])
+TimeSplitter(valid_size=0.2, show_plot=True)(y)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
((#800) [0,1,2,3,4,5,6,7,8,9...],
+ (#200) [800,801,802,803,804,805,806,807,808,809...])
+
+
+
+
n_splits                = 5
+valid_size              = 0.2  
+test_size               = 0
+train_only              = False  # set to True for debugging (valid = train)
+train_size              = None
+stratify                = True
+balance                 = True
+shuffle                 = True
+predefined_splits       = None
+show_plot               = True 
+
+
+check_splits = True
+random_state = 23
+
+splits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, balance=balance, stratify=stratify,
+                    train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)
+split = splits[0] if n_splits == 1 else splits[0][0]
+y[split].mean(), split
+
+
stratify set to False as n_splits=5 cannot be greater than the min number of members in each class (1).
+
+
+
+
+

+
+
+
+
+
+
list([splits[0], splits[1], splits[2], splits[3], splits[4]])
+
+
[((#800) [314,194,782,789,502,917,137,415,904,181...],
+  (#200) [362,151,934,378,95,597,500,117,980,844...]),
+ ((#800) [312,198,777,788,515,910,145,413,898,186...],
+  (#200) [352,133,955,396,64,596,442,79,991,882...]),
+ ((#800) [311,197,783,791,507,922,145,416,908,184...],
+  (#200) [338,125,912,361,54,594,486,88,994,859...]),
+ ((#800) [296,181,782,789,493,917,130,401,905,165...],
+  (#200) [405,199,953,444,113,610,515,137,997,881...]),
+ ((#800) [320,190,782,788,506,906,141,412,893,178...],
+  (#200) [336,149,942,358,49,582,472,70,990,907...])]
+
+
+
+
n_splits = 5
+valid_size = 0.
+test_size = 0.
+shuffle = True
+stratify = True
+train_only = True
+train_size = None
+check_splits = True
+random_state = 1
+show_plot = True 
+
+splits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, stratify=stratify,
+                    train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)
+for split in splits: 
+    test_eq(len(split[0]), len(y))
+    test_eq(np.sort(split[0]), np.arange(len(y)))
+
+
stratify set to False as n_splits=5 cannot be greater than the min number of members in each class (1).
+valid == train
+
+
+
+
+

+
+
+
+
+
+
n_splits = 5
+y = np.random.randint(0, 2, 1000)
+
+splits = get_splits(y, n_splits=n_splits, shuffle=False, check_splits=True)
+test_eq(np.concatenate((L(zip(*splits))[1])), np.arange(len(y)))
+
+splits = get_splits(y, n_splits=n_splits, shuffle=True, check_splits=True)
+test_eq(np.sort(np.concatenate((L(zip(*splits))[1]))), np.arange(len(y)))
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
n_splits = 2
+y = np.random.randint(0, 2, 1000)
+
+splits = get_splits(y, n_splits=n_splits, test_size=0.2, shuffle=False)
+for i in range(n_splits): leakage_finder(*splits[i])
+test_eq(len(splits), n_splits)
+test_eq(len(splits[0]), 3)
+s = []
+[s.extend(split) for split in splits[0]]
+test_eq(np.sort(s), np.arange(len(y)))
+s = []
+[s.extend(split) for split in splits[1]]
+test_eq(np.sort(s), np.arange(len(y)))
+
+
+
+

+
+
+
+
+
+
y = np.random.randint(0, 2, 1000)
+splits1 = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)
+splits2 = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)
+splits3 = get_splits(y, valid_size=.25, test_size=0, random_state=None, stratify=True, shuffle=True)
+splits4 = get_splits(y, valid_size=.25, test_size=0, random_state=None, stratify=True, shuffle=True)
+test_eq(splits1[0], splits2[0])
+test_ne(splits3[0], splits4[0])
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
y = np.random.randint(0, 2, 100)
+splits = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)
+test_eq(len(splits), 2)
+
+
+
+

+
+
+
+
+
+
y = np.random.randint(0, 2, 100)
+splits = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True)
+test_eq(len(splits), 2)
+
+
+
+

+
+
+
+
+
+
y = np.random.randint(0, 2, 100)
+splits = get_splits(y, valid_size=.25, test_size=20, random_state=23, stratify=True)
+test_eq(len(splits), 3)
+leakage_finder(*splits)
+
+
+
+

+
+
+
+
+
+
splits = TrainValidTestSplitter(valid_size=.25, test_size=20, random_state=23, stratify=True)(np.random.randint(0, 2, 100))
+test_eq(len(splits[1]), 25)
+test_eq(len(splits[2]), 20)
+
+
+
o = np.random.randint(0, 2, 1000)
+for p in [1, .75, .5, .25, .125]:
+    splits = get_splits(o, train_size=p)
+    test_eq(len(splits[0]), len(o) * .8 * p)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
y = L([0] * 50 + [1] * 25 + [2] * 15 + [3] * 10)
+splits = get_splits(y, valid_size=.2, test_size=.2)
+test_eq(np.mean(y[splits[0]])==np.mean(y[splits[1]])==np.mean(y[splits[2]]), True)
+splits
+
+
+
+

+
+
+
+
+
((#60) [58,95,53,44,28,69,9,12,22,88...],
+ (#20) [89,71,60,4,19,37,75,13,46,30...],
+ (#20) [76,68,74,29,16,97,14,21,90,82...])
+
+
+
+
y = L([0] * 50 + [1] * 25 + [2] * 15 + [3] * 10)
+splits = get_splits(y, n_splits=1, valid_size=.2, test_size=.2, shuffle=False)
+# test_eq(splits[0] + splits[1] + splits[2], np.arange(100))
+splits
+
+
+
+

+
+
+
+
+
((#60) [0,1,2,3,4,5,6,7,8,9...],
+ (#20) [60,61,62,63,64,65,66,67,68,69...],
+ (#20) [80,81,82,83,84,85,86,87,88,89...])
+
+
+
+
splits = get_splits(np.random.randint(0,5,100), valid_size=0.213, test_size=17)
+test_eq(len(splits[1]), 21)
+test_eq(len(splits[2]), 17)
+
+
+
+

+
+
+
+
+
+
splits = get_splits(np.random.randint(0,5,100), valid_size=0.213, test_size=17, train_size=.2)
+splits
+
+
+
+

+
+
+
+
+
((#12) [37,38,62,60,16,22,95,44,94,98...],
+ (#21) [88,93,5,31,57,23,90,18,15,40...],
+ (#17) [4,86,47,33,59,52,99,48,70,3...])
+
+
+
+

source

+
+
+

combine_split_data

+
+
 combine_split_data (xs, ys=None)
+
+

xs is a list with X_train, X_valid, …. ys is None or a list with y_train, y_valid, ….

+
+

source

+
+
+

get_predefined_splits

+
+
 get_predefined_splits (*xs)
+
+

xs is a list with X_train, X_valid, …

+
+

source

+
+
+

get_splits_len

+
+
 get_splits_len (splits)
+
+
+
X_train, y_train, X_valid, y_valid = np.random.rand(3,3,4), np.random.randint(0,2,3), np.random.rand(2,3,4), np.random.randint(0,2,2)
+X, y, splits = combine_split_data([X_train, X_valid], [y_train, y_valid])
+test_eq(X_train, X[splits[0]])
+test_eq(X_valid, X[splits[1]])
+test_type(X_train, X)
+test_type(y_train, y)
+
+
+
X_train, y_train, X_valid, y_valid = np.random.rand(3,4), np.random.randint(0,2,3), np.random.rand(2,4), np.random.randint(0,2,2)
+X, y, splits = combine_split_data([X_train, X_valid], [y_train, y_valid])
+test_eq(X_train[:, None], X[splits[0]])
+test_eq(X_valid[:, None], X[splits[1]])
+test_type(X_train, X)
+test_type(y_train, y)
+
+
+
+

Forecasting

+
+

source

+
+

get_df_usable_idxs

+
+
 get_df_usable_idxs (df, fcst_history, fcst_horizon, stride=1,
+                     unique_id_cols=None, return_np_indices=False)
+
+

Calculates the indices that can be used from a df when using a sliding window

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing a sorted time series
fcst_history# historical steps used as input (size of the sliding window for the input)
fcst_horizon# steps forecasted into the future (size of the sliding window for the target)
strideint1int or tuple of 2 int containing the strides of the sliding windows (input and target)
unique_id_colsNoneTypeNonestr indicating the column/s with the unique identifier/s for each entity
return_np_indicesboolFalsebool indicating what type of indices are returned. Default to False (dataframe indices)
+
+

source

+
+
+

get_usable_idxs

+
+
 get_usable_idxs (df, fcst_history, fcst_horizon, stride=1)
+
+
+

source

+
+
+

calculate_fcst_stats

+
+
 calculate_fcst_stats (df, fcst_history, fcst_horizon, splits,
+                       x_vars=None, y_vars=None, subset_size=None)
+
+

Calculates the training stats required in a forecasting task

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing a sorted time series for a single entity or subject
fcst_history# historical steps used as input.
fcst_horizon# steps forecasted into the future.
splitssplits that will be used to train the model. splits[0] is the train split:
x_varsNoneTypeNonefeatures used as input
y_varsNoneTypeNonefeatures used as output
subset_sizeNoneTypeNoneint or float to determne the number of train samples used to calculate the mean and std
+
+

source

+
+
+

get_forecasting_splits

+
+
 get_forecasting_splits (df, fcst_history, fcst_horizon, stride=1,
+                         valid_size=0.0, test_size=0.2,
+                         valid_cutoff_datetime=None,
+                         test_cutoff_datetime=None, datetime_col=None,
+                         use_index=False, unique_id_cols=None,
+                         show_plot=True)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing a sorted time series
fcst_history# historical steps used as input (size of the sliding window for the input)
fcst_horizon# steps forecasted into the future (size of the sliding window for the target)
strideint1int or tuple of 2 int containing the strides of the sliding windows (input and target)
valid_sizefloat0.0int or float indicating the size of the training set (based on datetimes)
test_sizefloat0.2int or float indicating the size of the test set (based on datetimes)
valid_cutoff_datetimeNoneTypeNonefirst prediction datetime of validation dataset
test_cutoff_datetimeNoneTypeNonefirst prediction datetime of test dataset
datetime_colNoneTypeNonestr indicating the column with the datetime values
use_indexboolFalseflag to indicate if the datetime is in the index
unique_id_colsNoneTypeNonestr indicating the column/s with the unique identifier/s for each entity
show_plotboolTrueflag to indicate if splits should be plotted
+
+
df1_len = 100
+df2_len = 80
+
+datetime_col = 'datetime' 
+df1 = pd.DataFrame(np.arange(df1_len), columns=['value'])
+df1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')
+df1['type'] = 1
+
+df = df1
+display(df)
+
+# settings
+fcst_history          = 10
+fcst_horizon          = 1
+stride                = 1
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = False
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+# settings
+fcst_history          = 10
+fcst_horizon          = 5
+stride                = 5
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = False
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuedatetimetype
001749-03-311
111749-04-011
221749-04-021
331749-04-031
441749-04-041
............
95951749-07-041
96961749-07-051
97971749-07-061
98981749-07-071
99991749-07-081
+ +

100 rows × 3 columns

+
+
+
+
+
+

+
+
+
+
+
splits size   : [63, 9, 18] (90: [0.7, 0.1, 0.2])
+splits size   : [12, 2, 4] (18: [0.67, 0.11, 0.22])
+
+
+
+
+

+
+
+
+
+
+
df1_len = 100
+df2_len = 80
+
+datetime_col = 'datetime' 
+df1 = pd.DataFrame(np.arange(df1_len), columns=['value'])
+df1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')
+df1['type'] = 1
+df1_index = df1.set_index("datetime")
+
+df = df1_index
+display(df)
+
+# settings
+fcst_history          = 10
+fcst_horizon          = 1
+stride                = 1
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = True
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+# settings
+fcst_history          = 10
+fcst_horizon          = 5
+stride                = 5
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = True
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuetype
datetime
1749-03-3101
1749-04-0111
1749-04-0221
1749-04-0331
1749-04-0441
.........
1749-07-04951
1749-07-05961
1749-07-06971
1749-07-07981
1749-07-08991
+ +

100 rows × 2 columns

+
+
+
+
+
+

+
+
+
+
+
splits size   : [63, 9, 18] (90: [0.7, 0.1, 0.2])
+splits size   : [12, 2, 4] (18: [0.67, 0.11, 0.22])
+
+
+
+
+

+
+
+
+
+
+
df1_len = 100
+df2_len = 80
+
+datetime_col = 'datetime' 
+df1 = pd.DataFrame(np.arange(df1_len), columns=['value'])
+df1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')
+df1['type'] = 1
+df1_index = df1.set_index("datetime")
+df2 = pd.DataFrame(np.arange(df2_len) * 10, columns=['value'])
+df2['datetime'] = pd.date_range(pd.to_datetime('1749-04-15'), periods=df2_len, freq='1D')
+df2['type'] = 2
+df_comb = pd.concat([df1, df2]).reset_index(drop=True).reset_index(drop=True)
+
+
+df = df_comb
+display(df)
+
+# settings
+fcst_history          = 10
+fcst_horizon          = 3
+stride                = 1
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = False
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuedatetimetype
001749-03-311
111749-04-011
221749-04-021
331749-04-031
441749-04-041
............
1757501749-06-292
1767601749-06-302
1777701749-07-012
1787801749-07-022
1797901749-07-032
+ +

180 rows × 3 columns

+
+
+
+
+
+

+
+
+
+
+
splits size   : [101, 16, 31] (148: [0.68, 0.11, 0.21])
+
+
+
+
df1_len = 100
+df2_len = 80
+
+datetime_col = 'datetime' 
+df1 = pd.DataFrame(np.arange(df1_len), columns=['value'])
+df1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')
+df1['type'] = 1
+df1_index = df1.set_index("datetime")
+df2 = pd.DataFrame(np.arange(df2_len) * 10, columns=['value'])
+df2['datetime'] = pd.date_range(pd.to_datetime('1749-04-15'), periods=df2_len, freq='1D')
+df2['type'] = 2
+df_comb = pd.concat([df1, df2]).reset_index(drop=True).reset_index(drop=True)
+df_comb_index = df_comb.set_index("datetime")
+df_comb_index.index.name = None
+
+
+df = df_comb_index
+display(df)
+
+# settings
+fcst_history          = 15
+fcst_horizon          = 5
+stride                = 1
+unique_id_cols        = 'type'
+datetime_col          = 'datetime' 
+use_index             = True
+valid_size            = 0.1  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+test_size             = 0.2  # a percent (float) or a number of samples (int) - .1 means 10% of the dates
+valid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset
+test_cutoff_datetime  = '1749-12-24' # first prediction datetime of test dataset
+valid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+test_cutoff_datetime  = None # datetime compatible with the datetime_col containing the starting date for the validation dataset
+
+
+splits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, 
+                                unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,
+                                valid_size=valid_size, test_size=test_size, 
+                                valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)
+
+print(f"splits size   : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})")
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
valuetype
1749-03-3101
1749-04-0111
1749-04-0221
1749-04-0331
1749-04-0441
.........
1749-06-297502
1749-06-307602
1749-07-017702
1749-07-027802
1749-07-037902
+ +

180 rows × 2 columns

+
+
+
+
+
+

+
+
+
+
+
splits size   : [83, 14, 29] (126: [0.66, 0.11, 0.23])
+
+
+
+

source

+
+
+

get_long_term_forecasting_splits

+
+
 get_long_term_forecasting_splits (df, fcst_history, fcst_horizon,
+                                   dsid=None, show_plot=True)
+
+

Returns the train, valid and test splits for long-range time series datasets

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dfdataframe containing a sorted time series for a single entity or subject
fcst_history# historical steps used as input.
fcst_horizon# steps forecasted into the future.
dsidNoneTypeNonedataset name
show_plotboolTrueplot the splits
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/export.html b/export.html new file mode 100644 index 000000000..3a99cdc43 --- /dev/null +++ b/export.html @@ -0,0 +1,1365 @@ + + + + + + + + + +tsai - nb2py + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

nb2py

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

nb2py will allow you to convert the notebook (.ipynb) where the function is executed to a python script.

+
+

The conversion applies these rules:

+ +

This code is required to identify flags in the notebook. We are looking for #hide flags.

+

This code automatically gets the name of the notebook. It’s been tested to work on Jupyter notebooks, Jupyter Lab and Google Colab.

+
+

source

+
+

get_script_path

+
+
 get_script_path (nb_name=None)
+
+
+

source

+
+
+

nb_name_to_py

+
+
 nb_name_to_py (nb_name)
+
+
+

source

+
+
+

get_nb_path

+
+
 get_nb_path ()
+
+

Returns the absolute path of the notebook, or raises a FileNotFoundError exception if it cannot be determined.

+
+

source

+
+
+

get_colab_nb_name

+
+
 get_colab_nb_name ()
+
+
+

source

+
+
+

get_nb_name

+
+
 get_nb_name (d=None)
+
+

Returns the short name of the notebook w/o the .ipynb extension, or raises a FileNotFoundError exception if it cannot be determined.

+

This code is used when trying to save a file to google drive. We first need to mount the drive.

+
+

source

+
+
+

nb2py

+
+
 nb2py (nb:str<absoluteorrelativefullpathtothenotebookyouwanttoconverttoap
+        ythonscript>=None, folder:str<absoluteorrelativepathtofolderofthes
+        criptyouwillcreate.Defaultstocurrentnb'sdirectory>=None, name:str<
+        nameofthescriptyouwanttocreate.Defaultstocurrentnbname.ipynbby.py>
+        =None, save:<savesthenbbeforeconvertingittoascript>=True,
+        run:<importandrunthescript>=False,
+        verbose:<controlsverbosity>=True)
+
+

Converts a notebook to a python script in a predefined folder.

+
+
if not is_colab():
+    nb = None
+    folder = None
+    name = None
+    pyname = nb2py(nb=nb, folder=folder, name=name)
+    if pyname is not None: 
+        assert os.path.isfile(pyname)
+        os.remove(pyname)
+        assert not os.path.isfile(pyname)
+
+    nb = '001_export.ipynb'
+    folder = None
+    name = None
+    pyname = nb2py(nb=nb, folder=folder, name=name)
+    if pyname is not None: 
+        assert os.path.isfile(pyname)
+        os.remove(pyname)
+        assert not os.path.isfile(pyname)
+
+    nb = '../nbs/001_export'
+    folder = None
+    name = None
+    pyname = nb2py(nb=nb, folder=folder, name=name)
+    if pyname is not None: 
+        assert os.path.isfile(pyname)
+        os.remove(pyname)
+        assert not os.path.isfile(pyname)
+
+    nb = None
+    folder = '../test_export/'
+    name = None
+    pyname = nb2py(nb=nb, folder=folder, name=name)
+    if pyname is not None: 
+        assert os.path.isfile(pyname)
+        shutil.rmtree(folder)
+        assert not os.path.isfile(pyname)
+
+
nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py.
+001_export.ipynb converted to /Users/nacho/notebooks/tsai/nbs/001_export.py
+001_export.ipynb converted to /Users/nacho/notebooks/tsai/nbs/../nbs/001_export.py
+nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py.
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..3d3f07488 --- /dev/null +++ b/index.html @@ -0,0 +1,1517 @@ + + + + + + + + + +tsai + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

tsai

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

+
+



+

CI PyPI Conda (channel only) DOI PRs

+
+

Description

+
+

State-of-the-art Deep Learning library for Time Series and Sequences.

+
+

tsai is an open-source deep learning package built on top of Pytorch & fastai focused on state-of-the-art techniques for time series tasks like classification, regression, forecasting, imputation…

+

tsai is currently under active development by timeseriesAI.

+
+
+

What’s new:

+

During the last few releases, here are some of the most significant additions to tsai:

+
    +
  • New models: PatchTST (Accepted by ICLR 2023), RNN with Attention (RNNAttention, LSTMAttention, GRUAttention), TabFusionTransformer, …
  • +
  • New datasets: we have increased the number of datasets you can download using tsai: +
      +
    • 128 univariate classification datasets
    • +
    • 30 multivariate classification datasets
    • +
    • 15 regression datasets
    • +
    • 62 forecasting datasets
    • +
    • 9 long term forecasting datasets
    • +
  • +
  • New tutorials: PatchTST. Based on some of your requests, we are planning to release additional tutorials on data preparation and forecasting.
  • +
  • New functionality: sklearn-type pipeline transforms, walk-foward cross validation, reduced RAM requirements, and a lot of new functionality to perform more accurate time series forecasts.
  • +
  • Pytorch 2.0 support.
  • +
+
+
+

Installation

+
+

Pip install

+

You can install the latest stable version from pip using:

+
pip install tsai
+

If you plan to develop tsai yourself, or want to be on the cutting edge, you can use an editable install. First install PyTorch, and then:

+
git clone https://github.com/timeseriesAI/tsai
+pip install -e "tsai[dev]"
+

Note: starting with tsai 0.3.0 tsai will only install hard dependencies. Other soft dependencies (which are only required for selected tasks) will not be installed by default (this is the recommended approach. If you require any of the dependencies that is not installed, tsai will ask you to install it when necessary). If you still want to install tsai with all its dependencies you can do it by running:

+
pip install tsai[extras]
+
+
+

Conda install

+

You can also install tsai using conda (note that if you replace conda with mamba the install process will be much faster and more reliable):

+
conda install -c timeseriesai tsai
+
+
+
+

Documentation

+

Here’s the link to the documentation.

+
+
+

Available models:

+

Here’s a list with some of the state-of-the-art models available in tsai:

+ +

plus other custom models like: TransformerModel, LSTMAttention, GRUAttention, …

+
+
+

How to start using tsai?

+

To get to know the tsai package, we’d suggest you start with this notebook in Google Colab: 01_Intro_to_Time_Series_Classification It provides an overview of a time series classification task.

+

We have also develop many other tutorial notebooks.

+

To use tsai in your own notebooks, the only thing you need to do after you have installed the package is to run this:

+
from tsai.all import *
+
+
+

Examples

+

These are just a few examples of how you can use tsai:

+
+

Binary, univariate classification

+

Training:

+
from tsai.basics import *
+
+X, y, splits = get_classification_data('ECG200', split_data=False)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize()
+clf = TSClassifier(X, y, splits=splits, path='models', arch="InceptionTimePlus", tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, cbs=ShowGraph())
+clf.fit_one_cycle(100, 3e-4)
+clf.export("clf.pkl") 
+

Inference:

+
from tsai.inference import load_learner
+
+clf = load_learner("models/clf.pkl")
+probas, target, preds = clf.get_X_preds(X[splits[1]], y[splits[1]])
+
+
+

Multi-class, multivariate classification

+

Training:

+
from tsai.basics import *
+
+X, y, splits = get_classification_data('LSST', split_data=False)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_sample=True)
+mv_clf = TSClassifier(X, y, splits=splits, path='models', arch="InceptionTimePlus", tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, cbs=ShowGraph())
+mv_clf.fit_one_cycle(10, 1e-2)
+mv_clf.export("mv_clf.pkl")
+

Inference:

+
from tsai.inference import load_learner
+
+mv_clf = load_learner("models/mv_clf.pkl")
+probas, target, preds = mv_clf.get_X_preds(X[splits[1]], y[splits[1]])
+
+
+

Multivariate Regression

+

Training:

+
from tsai.basics import *
+
+X, y, splits = get_regression_data('AppliancesEnergy', split_data=False)
+tfms = [None, TSRegression()]
+batch_tfms = TSStandardize(by_sample=True)
+reg = TSRegressor(X, y, splits=splits, path='models', arch="TSTPlus", tfms=tfms, batch_tfms=batch_tfms, metrics=rmse, cbs=ShowGraph(), verbose=True)
+reg.fit_one_cycle(100, 3e-4)
+reg.export("reg.pkl")
+

Inference:

+
from tsai.inference import load_learner
+
+reg = load_learner("models/reg.pkl")
+raw_preds, target, preds = reg.get_X_preds(X[splits[1]], y[splits[1]])
+

The ROCKETs (RocketClassifier, RocketRegressor, MiniRocketClassifier, MiniRocketRegressor, MiniRocketVotingClassifier or MiniRocketVotingRegressor) are somewhat different models. They are not actually deep learning models (although they use convolutions) and are used in a different way.

+

⚠️ You’ll also need to install sktime to be able to use them. You can install it separately:

+
pip install sktime
+

or use:

+
pip install tsai[extras]
+

Training:

+
from sklearn.metrics import mean_squared_error, make_scorer
+from tsai.data.external import get_Monash_regression_data
+from tsai.models.MINIROCKET import MiniRocketRegressor
+
+X_train, y_train, *_ = get_Monash_regression_data('AppliancesEnergy')
+rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
+reg = MiniRocketRegressor(scoring=rmse_scorer)
+reg.fit(X_train, y_train)
+reg.save('MiniRocketRegressor')
+

Inference:

+
from sklearn.metrics import mean_squared_error
+from tsai.data.external import get_Monash_regression_data
+from tsai.models.MINIROCKET import load_minirocket
+
+*_, X_test, y_test = get_Monash_regression_data('AppliancesEnergy')
+reg = load_minirocket('MiniRocketRegressor')
+y_pred = reg.predict(X_test)
+mean_squared_error(y_test, y_pred, squared=False)
+
+
+

Forecasting

+

You can use tsai for forecast in the following scenarios:

+
    +
  • univariate or multivariate time series input
  • +
  • univariate or multivariate time series output
  • +
  • single or multi-step ahead
  • +
+

You’ll need to: * prepare X (time series input) and the target y (see documentation) * select PatchTST or one of tsai’s models ending in Plus (TSTPlus, InceptionTimePlus, TSiTPlus, etc). The model will auto-configure a head to yield an output with the same shape as the target input y.

+
+

Single step

+

Training:

+
from tsai.basics import *
+
+ts = get_forecasting_time_series("Sunspots").values
+X, y = SlidingWindow(60, horizon=1)(ts)
+splits = TimeSplitter(235)(y) 
+tfms = [None, TSForecasting()]
+batch_tfms = TSStandardize()
+fcst = TSForecaster(X, y, splits=splits, path='models', tfms=tfms, batch_tfms=batch_tfms, bs=512, arch="TSTPlus", metrics=mae, cbs=ShowGraph())
+fcst.fit_one_cycle(50, 1e-3)
+fcst.export("fcst.pkl")
+

Inference:

+
from tsai.inference import load_learner
+
+fcst = load_learner("models/fcst.pkl", cpu=False)
+raw_preds, target, preds = fcst.get_X_preds(X[splits[1]], y[splits[1]])
+raw_preds.shape
+# torch.Size([235, 1])
+
+
+

Multi-step

+

This example show how to build a 3-step ahead univariate forecast.

+

Training:

+
from tsai.basics import *
+
+ts = get_forecasting_time_series("Sunspots").values
+X, y = SlidingWindow(60, horizon=3)(ts)
+splits = TimeSplitter(235, fcst_horizon=3)(y) 
+tfms = [None, TSForecasting()]
+batch_tfms = TSStandardize()
+fcst = TSForecaster(X, y, splits=splits, path='models', tfms=tfms, batch_tfms=batch_tfms, bs=512, arch="TSTPlus", metrics=mae, cbs=ShowGraph())
+fcst.fit_one_cycle(50, 1e-3)
+fcst.export("fcst.pkl")
+

Inference:

+
from tsai.inference import load_learner
+fcst = load_learner("models/fcst.pkl", cpu=False)
+raw_preds, target, preds = fcst.get_X_preds(X[splits[1]], y[splits[1]])
+raw_preds.shape
+# torch.Size([235, 3])
+
+
+
+
+

Input data format

+

The input format for all time series models and image models in tsai is the same. An np.ndarray (or array-like object like zarr, etc) with 3 dimensions:

+

[# samples x # variables x sequence length]

+

The input format for tabular models in tsai (like TabModel, TabTransformer and TabFusionTransformer) is a pandas dataframe. See example.

+
+
+

How to contribute to tsai?

+

We welcome contributions of all kinds. Development of enhancements, bug fixes, documentation, tutorial notebooks, …

+

We have created a guide to help you start contributing to tsai. You can read it here.

+
+
+

Enterprise support and consulting services:

+

Want to make the most out of timeseriesAI/tsai in a professional setting? Let us help. Send us an email to learn more: info@timeseriesai.co

+
+
+

Citing tsai

+

If you use tsai in your research please use the following BibTeX entry:

+
@Misc{tsai,
+    author =       {Ignacio Oguiza},
+    title =        {tsai - A state-of-the-art deep learning library for time series and sequential data},
+    howpublished = {Github},
+    year =         {2023},
+    url =          {https://github.com/timeseriesAI/tsai}
+}
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/inference.html b/inference.html new file mode 100644 index 000000000..6aa86b4ae --- /dev/null +++ b/inference.html @@ -0,0 +1,1569 @@ + + + + + + + + + +tsai - Inference + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Inference

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Code required for inference.

+
+
+

source

+
+

Learner.get_X_preds

+
+
 Learner.get_X_preds (X, y=None, bs=64, with_input=False,
+                      with_decoded=True, with_loss=False, act=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
X
yNoneTypeNone
bsint64
with_inputboolFalsereturns the input as well
with_decodedboolTruereturns decoded predictions as well
with_lossboolFalsereturns the loss per item as well
actNoneTypeNoneApply activation to predictions, defaults to self.loss_func’s activation
+

Get the predictions and targets, optionally with_input and with_loss.

+

with_decoded will also return the decoded predictions (it reverses the transforms applied).

+

The order of the output is the following:

+
    +
  • input (optional): if with_input is True
  • +
  • probabiblities (for classification) or predictions (for regression)
  • +
  • target: if y is provided. Otherwise None.
  • +
  • predictions: predicted labels. Predictions will be decoded if with_decoded=True.
  • +
  • loss (optional): if with_loss is set to True and y is not None.
  • +
+
+
from tsai.data.external import get_UCR_data
+
+
+
dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+X_test = X[splits[1]]
+y_test = y[splits[1]]
+
+
+
learn = load_learner("./models/test.pth")
+
+

⚠️ Warning: load_learner (from fastai) requires all your custom code be in the exact same place as when exporting your Learner (the main script, or the module you imported it from).

+
+
test_probas, test_targets, test_preds = learn.get_X_preds(X_test, with_decoded=True)
+test_probas, test_targets, test_preds
+
+ + +
+
+ +
+
+
(tensor([[0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2639],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640]]),
+ None,
+ array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4'], dtype='<U1'))
+
+
+
+
import torch
+from fastcore.test import test_close
+
+
+
torch_test_probas, torch_test_targets, torch_test_preds = learn.get_X_preds(torch.from_numpy(X_test), with_decoded=True)
+torch_test_probas, torch_test_targets, torch_test_preds
+test_close(test_probas, torch_test_probas)
+
+ + +
+
+ +
+
+
+
test_probas2, test_targets2, test_preds2 = learn.get_X_preds(X_test, y_test, with_decoded=True)
+test_probas2, test_targets2, test_preds2
+
+ + +
+
+ +
+
+
(tensor([[0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2639],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640]]),
+ tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+         3, 3, 3, 3, 3, 3]),
+ array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4'], dtype='<U1'))
+
+
+
+
test_probas3, test_targets3, test_preds3, test_losses3 = learn.get_X_preds(X_test, y_test, with_loss=True, with_decoded=True)
+test_probas3, test_targets3, test_preds3, test_losses3
+
+ + +
+
+ +
+
+
(tensor([[0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2639],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2421, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2421, 0.2364, 0.2641],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640],
+         [0.2574, 0.2422, 0.2364, 0.2640]]),
+ tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+         3, 3, 3, 3, 3, 3]),
+ array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',
+        '4', '4', '4', '4'], dtype='<U1'),
+ TensorBase([1.3572, 1.3572, 1.3572, 1.3571, 1.3572, 1.4181, 1.4181, 1.4181,
+             1.4181, 1.4181, 1.4181, 1.4181, 1.4181, 1.4181, 1.4423, 1.4422,
+             1.4422, 1.4422, 1.3316, 1.3316, 1.3316, 1.3316, 1.3316, 1.3316,
+             1.3316, 1.3316, 1.3316, 1.3316, 1.3317, 1.3317]))
+
+
+
+
from fastcore.test import test_eq
+
+
+
test_eq(test_probas, test_probas2)
+test_eq(test_preds, test_preds2)
+test_eq(test_probas, test_probas3)
+test_eq(test_preds, test_preds3)
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/learner.html b/learner.html new file mode 100644 index 000000000..b26b08027 --- /dev/null +++ b/learner.html @@ -0,0 +1,1761 @@ + + + + + + + + + +tsai - Learner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Learner

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

fastai Learner extensions.

+
+
+

source

+
+

Learner.show_batch

+
+
 Learner.show_batch (**kwargs)
+
+
+

source

+
+
+

Learner.remove_all_cbs

+
+
 Learner.remove_all_cbs (max_iters=10)
+
+
+

source

+
+
+

Learner.one_batch

+
+
 Learner.one_batch (i, b)
+
+
+

source

+
+
+

Learner.inverse_transform

+
+
 Learner.inverse_transform (df:pandas.core.frame.DataFrame)
+
+

Applies sklearn-type pipeline inverse transforms

+
+

source

+
+
+

Learner.transform

+
+
 Learner.transform (df:pandas.core.frame.DataFrame)
+
+

Applies sklearn-type pipeline transforms

+

⚠️ Important: save_all and load_all methods are designed for small datasets only. If you are using a larger dataset, you should use the standard save and load_learner methods.

+
+

source

+
+
+

load_all

+
+
 load_all (path='export', dls_fname='dls', model_fname='model',
+           learner_fname='learner', device=None, pickle_module=<module
+           'pickle' from '/opt/hostedtoolcache/Python/3.9.18/x64/lib/pytho
+           n3.9/pickle.py'>, verbose=False)
+
+
+

source

+
+
+

Learner.save_all

+
+
 Learner.save_all (path='export', dls_fname='dls', model_fname='model',
+                   learner_fname='learner', verbose=False)
+
+
+
from tsai.data.core import get_ts_dls
+from tsai.utils import remove_dir
+
+
+
X = np.random.rand(100, 2, 10)
+dls = get_ts_dls(X)
+learn = Learner(dls, InceptionTimePlus(2, 1), loss_func=MSELossFlat())
+learn.save_all(Path.home()/'tmp', verbose=True)
+learn2 = load_all(Path.home()/'tmp', verbose=True)
+remove_dir(Path.home()/'tmp')
+
+
Learner saved:
+path          = '/Users/nacho/tmp'
+dls_fname     = '['dls_0.pth', 'dls_1.pth']'
+model_fname   = 'model.pth'
+learner_fname = 'learner.pkl'
+Learner loaded:
+path          = '/Users/nacho/tmp'
+dls_fname     = '['dls_0.pth', 'dls_1.pth']'
+model_fname   = 'model.pth'
+learner_fname = 'learner.pkl'
+/Users/nacho/tmp directory removed.
+
+
+
+

source

+
+
+

Learner.plot_metrics

+
+
 Learner.plot_metrics (nrows:int=1, ncols:int=1, figsize:tuple=None,
+                       imsize:int=3, suptitle:str=None, sharex:Union[bool,
+                       Literal['none','all','row','col']]=False, sharey:Un
+                       ion[bool,Literal['none','all','row','col']]=False,
+                       squeeze:bool=True,
+                       width_ratios:Optional[Sequence[float]]=None,
+                       height_ratios:Optional[Sequence[float]]=None,
+                       subplot_kw:Optional[dict[str,Any]]=None,
+                       gridspec_kw:Optional[dict[str,Any]]=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
nrowsint1Number of rows in returned axes grid
ncolsint1Number of columns in returned axes grid
figsizetupleNoneWidth, height in inches of the returned figure
imsizeint3Size (in inches) of images that will be displayed in the returned figure
suptitlestrNoneTitle to be set to returned figure
sharexbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
shareybool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
squeezeboolTrue
width_ratiosSequence[float] | NoneNone
height_ratiosSequence[float] | NoneNone
subplot_kwdict[str, Any] | NoneNone
gridspec_kwdict[str, Any] | NoneNone
Returns(plt.Figure, plt.Axes)Returns both fig and ax as a tuple
+
+

source

+
+
+

Recorder.plot_metrics

+
+
 Recorder.plot_metrics (nrows=None, ncols=None, figsize=None,
+                        final_losses=True, perc=0.5, imsize:int=3,
+                        suptitle:str=None, sharex:Union[bool,Literal['none
+                        ','all','row','col']]=False, sharey:Union[bool,Lit
+                        eral['none','all','row','col']]=False,
+                        squeeze:bool=True,
+                        width_ratios:Optional[Sequence[float]]=None,
+                        height_ratios:Optional[Sequence[float]]=None,
+                        subplot_kw:Optional[dict[str,Any]]=None,
+                        gridspec_kw:Optional[dict[str,Any]]=None)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
nrowsint1Number of rows in returned axes grid
ncolsint1Number of columns in returned axes grid
figsizetupleNoneWidth, height in inches of the returned figure
final_lossesboolTrue
percfloat0.5
imsizeint3Size (in inches) of images that will be displayed in the returned figure
suptitlestrNoneTitle to be set to returned figure
sharexbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
shareybool | Literal[‘none’, ‘all’, ‘row’, ‘col’]False
squeezeboolTrue
width_ratiosSequence[float] | NoneNone
height_ratiosSequence[float] | NoneNone
subplot_kwdict[str, Any] | NoneNone
gridspec_kwdict[str, Any] | NoneNone
Returns(plt.Figure, plt.Axes)Returns both fig and ax as a tuple
+
+

source

+
+
+

get_arch

+
+
 get_arch (arch_name)
+
+
+
for arch_name in all_arch_names:
+    get_arch(arch_name)
+
+
+

source

+
+
+

ts_learner

+
+
 ts_learner (dls, arch=None, c_in=None, c_out=None, seq_len=None, d=None,
+             s_cat_idxs=None, s_cat_embeddings=None,
+             s_cat_embedding_dims=None, s_cont_idxs=None, o_cat_idxs=None,
+             o_cat_embeddings=None, o_cat_embedding_dims=None,
+             o_cont_idxs=None, splitter=<function trainable_params>,
+             loss_func=None, opt_func=<function Adam>, lr=0.001, cbs=None,
+             metrics=None, path=None, model_dir='models', wd=None,
+             wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95),
+             train_metrics=False, valid_metrics=True, seed=None,
+             device=None, verbose=False, patch_len=None,
+             patch_stride=None, fusion_layers=128, fusion_act='relu',
+             fusion_dropout=0.0, fusion_use_bn=True, pretrained=False,
+             weights_path=None, exclude_head=True, cut=-1, init=None,
+             arch_config={})
+
+
+

source

+
+
+

tsimage_learner

+
+
 tsimage_learner (dls, arch=None, pretrained=False, loss_func=None,
+                  opt_func=<function Adam>, lr=0.001, cbs=None,
+                  metrics=None, path=None, model_dir='models', wd=None,
+                  wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85,
+                  0.95), c_in=None, c_out=None, device=None,
+                  verbose=False, init=None, arch_config={})
+
+
+

source

+
+
+

Learner.decoder

+
+
 Learner.decoder (o)
+
+
+
from tsai.data.core import *
+from tsai.data.external import get_UCR_data
+from tsai.models.FCNPlus import FCNPlus
+
+
+
X, y, splits = get_UCR_data('OliveOil', verbose=True, split_data=False)
+tfms  = [None, [TSCategorize()]]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
+learn = ts_learner(dls, FCNPlus)
+for p in learn.model.parameters():
+    p.requires_grad=False
+test_eq(count_parameters(learn.model), 0)
+learn.freeze()
+test_eq(count_parameters(learn.model), 1540)
+learn.unfreeze()
+test_eq(count_parameters(learn.model), 264580)
+
+learn = ts_learner(dls, 'FCNPlus')
+for p in learn.model.parameters():
+    p.requires_grad=False
+test_eq(count_parameters(learn.model), 0)
+learn.freeze()
+test_eq(count_parameters(learn.model), 1540)
+learn.unfreeze()
+test_eq(count_parameters(learn.model), 264580)
+
+
Dataset: OliveOil
+X      : (60, 1, 570)
+y      : (60,)
+splits : (#30) [0,1,2,3,4,5,6,7,8,9...] (#30) [30,31,32,33,34,35,36,37,38,39...] 
+
+
+
+
+
learn.show_batch();
+
+
+
+

+
+
+
+
+
+
from fastai.metrics import accuracy
+from tsai.data.preprocessing import TSRobustScale
+
+
+
X, y, splits = get_UCR_data('OliveOil', split_data=False)
+tfms  = [None, TSClassification()]
+batch_tfms = TSRobustScale()
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)
+learn = ts_learner(dls, FCNPlus, metrics=accuracy, train_metrics=True)
+learn.fit_one_cycle(2)
+learn.plot_metrics()
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
epochtrain_losstrain_accuracyvalid_lossvalid_accuracytime
01.4808750.2666671.3904610.30000000:02
11.4766550.2666671.3873700.30000000:01
+
+
+
+
+

+
+
+
+
+
+
if not os.path.exists("./models"): os.mkdir("./models")
+if not os.path.exists("./data"): os.mkdir("./data")
+np.save("data/X_test.npy", X[splits[1]])
+np.save("data/y_test.npy", y[splits[1]])
+learn.export("./models/test.pth")
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/losses.html b/losses.html new file mode 100644 index 000000000..947f3c21c --- /dev/null +++ b/losses.html @@ -0,0 +1,1387 @@ + + + + + + + + + +tsai - Losses + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Losses

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Losses not available in fastai or Pytorch.

+
+
+

source

+
+

HuberLoss

+
+
 HuberLoss (reduction='mean', delta=1.0)
+
+

Huber loss

+

Creates a criterion that uses a squared term if the absolute element-wise error falls below delta and a delta-scaled L1 term otherwise. This loss combines advantages of both :class:L1Loss and :class:MSELoss; the delta-scaled L1 region makes the loss less sensitive to outliers than :class:MSELoss, while the L2 region provides smoothness over :class:L1Loss near 0. See Huber loss <https://en.wikipedia.org/wiki/Huber_loss>_ for more information. This loss is equivalent to nn.SmoothL1Loss when delta == 1.

+
+

source

+
+
+

LogCoshLoss

+
+
 LogCoshLoss (reduction='mean', delta=1.0)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
inp = torch.rand(8, 3, 10)
+targ = torch.randn(8, 3, 10)
+test_close(HuberLoss(delta=1)(inp, targ), nn.SmoothL1Loss()(inp, targ))
+LogCoshLoss()(inp, targ)
+
+
tensor(0.4588)
+
+
+
+

source

+
+
+

MaskedLossWrapper

+
+
 MaskedLossWrapper (crit)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
inp = torch.rand(8, 3, 10)
+targ = torch.randn(8, 3, 10)
+targ[targ >.8] = np.nan
+nn.L1Loss()(inp, targ), MaskedLossWrapper(nn.L1Loss())(inp, targ)
+
+
(tensor(nan), tensor(1.0520))
+
+
+
+

source

+
+
+

CenterPlusLoss

+
+
 CenterPlusLoss (loss, c_out, λ=0.01, logits_dim=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

CenterLoss

+
+
 CenterLoss (c_out, logits_dim=None)
+
+

Code in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py Based on paper: Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.

+

Args: c_out (int): number of classes. logits_dim (int): dim 1 of the logits. By default same as c_out (for one hot encoded logits)

+
+
c_in = 10
+x = torch.rand(64, c_in).to(device=default_device())
+x = F.softmax(x, dim=1)
+label = x.max(dim=1).indices
+CenterLoss(c_in).to(x.device)(x, label), CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in).to(x.device)(x, label)
+
+
(tensor(9.2481, grad_fn=<DivBackward0>),
+ TensorBase(2.3559, grad_fn=<AliasBackward0>))
+
+
+
+
CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)
+
+
CenterPlusLoss(loss=FlattenedLoss of LabelSmoothingCrossEntropy(), c_out=10, λ=0.01)
+
+
+
+

source

+
+
+

FocalLoss

+
+
 FocalLoss (alpha:Optional[torch.Tensor]=None, gamma:float=2.0,
+            reduction:str='mean')
+
+

Weighted, multiclass focal loss

+
+
inputs = torch.normal(0, 2, (16, 2)).to(device=default_device())
+targets = torch.randint(0, 2, (16,)).to(device=default_device())
+FocalLoss()(inputs, targets)
+
+
tensor(0.9829)
+
+
+
+

source

+
+
+

TweedieLoss

+
+
 TweedieLoss (p=1.5, eps=1e-08)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
c_in = 10
+output = torch.rand(64).to(device=default_device())
+target = torch.rand(64).to(device=default_device())
+TweedieLoss().to(output.device)(output, target)
+
+
tensor(3.0539)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/metrics.html b/metrics.html new file mode 100644 index 000000000..d6ac0dd29 --- /dev/null +++ b/metrics.html @@ -0,0 +1,1375 @@ + + + + + + + + + +tsai - Metrics + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Metrics

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Metrics not included in fastai.

+
+
+

source

+
+

MatthewsCorrCoefBinary

+
+
 MatthewsCorrCoefBinary (sample_weight=None)
+
+

Matthews correlation coefficient for single-label classification problems

+
+

source

+
+
+

get_task_metrics

+
+
 get_task_metrics (dls, binary_metrics=None, multi_class_metrics=None,
+                   regression_metrics=None, verbose=True)
+
+

All metrics applicable to multi classification have been created by Doug Williams (https://github.com/williamsdoug). Thanks a lot Doug!!

+
+

source

+
+
+

F1_multi

+
+
 F1_multi (*args, **kwargs)
+
+
+

source

+
+
+

Fbeta_multi

+
+
 Fbeta_multi (inp, targ, beta=1.0, thresh=0.5, sigmoid=True)
+
+

Computes Fbeta when inp and targ are the same size.

+
+

source

+
+
+

balanced_accuracy_multi

+
+
 balanced_accuracy_multi (inp, targ, thresh=0.5, sigmoid=True)
+
+

Computes balanced accuracy when inp and targ are the same size.

+
+

source

+
+
+

specificity_multi

+
+
 specificity_multi (inp, targ, thresh=0.5, sigmoid=True)
+
+

Computes specificity (true negative rate) when inp and targ are the same size.

+
+

source

+
+
+

recall_multi

+
+
 recall_multi (inp, targ, thresh=0.5, sigmoid=True)
+
+

Computes recall when inp and targ are the same size.

+
+

source

+
+
+

precision_multi

+
+
 precision_multi (inp, targ, thresh=0.5, sigmoid=True)
+
+

Computes precision when inp and targ are the same size.

+
+

source

+
+
+

metrics_multi_common

+
+
 metrics_multi_common (inp, targ, thresh=0.5, sigmoid=True,
+                       by_sample=False)
+
+

Computes TP, TN, FP, FN when inp and targ are the same size.

+
+

source

+
+
+

accuracy_multi

+
+
 accuracy_multi (inp, targ, thresh=0.5, sigmoid=True, by_sample=False)
+
+

Computes accuracy when inp and targ are the same size.

+
+

source

+
+
+

mae

+
+
 mae (inp, targ)
+
+

Mean absolute error between inp and targ.

+
+

source

+
+
+

mape

+
+
 mape (inp, targ)
+
+

Mean absolute percentage error between inp and targ.

+
+
n_classes = 4
+inp = torch.normal(0, 1, (16, 20, n_classes))
+targ = torch.randint(0, n_classes, (16, 20)).to(torch.int8)
+_mAP(inp, targ)
+
+
0.27493315845795063
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.convtranplus.html b/models.convtranplus.html new file mode 100644 index 000000000..e67f41aee --- /dev/null +++ b/models.convtranplus.html @@ -0,0 +1,1853 @@ + + + + + + + + + +tsai - ConvTranPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ConvTranPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

ConvTran: Improving Position Encoding of Transformers for Multivariate Time Series Classification

+
+

This is a Pytorch implementation of ConvTran adapted by Ignacio Oguiza and based on:

+

Foumani, N. M., Tan, C. W., Webb, G. I., & Salehi, M. (2023). Improving Position Encoding of Transformers for Multivariate Time Series Classification. arXiv preprint arXiv:2305.16642.

+

Pre-print: https://arxiv.org/abs/2305.16642v1

+

Original repository: https://github.com/Navidfoumani/ConvTran

+
+

source

+
+

tAPE

+
+
 tAPE (d_model:int, seq_len=1024, dropout:float=0.1, scale_factor=1.0)
+
+

time Absolute Position Encoding

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
d_modelintthe embedding dimension
seq_lenint1024the max. length of the incoming sequence
dropoutfloat0.1dropout value
scale_factorfloat1.0
+
+
t = torch.randn(8, 50, 128)
+assert tAPE(128, 50)(t).shape == t.shape
+
+
+

source

+
+
+

AbsolutePositionalEncoding

+
+
 AbsolutePositionalEncoding (d_model:int, seq_len=1024, dropout:float=0.1,
+                             scale_factor=1.0)
+
+

Absolute positional encoding

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
d_modelintthe embedding dimension
seq_lenint1024the max. length of the incoming sequence
dropoutfloat0.1dropout value
scale_factorfloat1.0
+
+
t = torch.randn(8, 50, 128)
+assert AbsolutePositionalEncoding(128, 50)(t).shape == t.shape
+
+
+

source

+
+
+

LearnablePositionalEncoding

+
+
 LearnablePositionalEncoding (d_model:int, seq_len=1024,
+                              dropout:float=0.1)
+
+

Learnable positional encoding

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
d_modelintthe embedding dimension
seq_lenint1024the max. length of the incoming sequence
dropoutfloat0.1dropout value
+
+
t = torch.randn(8, 50, 128)
+assert LearnablePositionalEncoding(128, 50)(t).shape == t.shape
+
+
+

source

+
+
+

Attention

+
+
 Attention (d_model:int, n_heads:int=8, dropout:float=0.01)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
d_modelintEmbedding dimension
n_headsint8number of attention heads
dropoutfloat0.01dropout
+
+
t = torch.randn(8, 50, 128)
+assert Attention(128)(t).shape == t.shape
+
+
+

source

+
+
+

Attention_Rel_Scl

+
+
 Attention_Rel_Scl (d_model:int, seq_len:int, n_heads:int=8,
+                    dropout:float=0.01)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
d_modelintEmbedding dimension
seq_lenintsequence length
n_headsint8number of attention heads
dropoutfloat0.01dropout
+
+
t = torch.randn(8, 50, 128)
+assert Attention_Rel_Scl(128, 50)(t).shape == t.shape
+
+
+

source

+
+
+

Attention_Rel_Vec

+
+
 Attention_Rel_Vec (d_model:int, seq_len:int, n_heads:int=8,
+                    dropout:float=0.01)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
d_modelintEmbedding dimension
seq_lenintsequence length
n_headsint8number of attention heads
dropoutfloat0.01dropout
+
+
t = torch.randn(8, 50, 128)
+assert Attention_Rel_Vec(128, 50)(t).shape == t.shape
+
+
+

source

+
+
+

ConvTranBackbone

+
+
 ConvTranBackbone (c_in:int, seq_len:int, d_model=16, n_heads:int=8,
+                   dim_ff:int=256, abs_pos_encode:str='tAPE',
+                   rel_pos_encode:str='eRPE', dropout:float=0.01)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
c_inint
seq_lenint
d_modelint16Internal dimension of transformer embeddings
n_headsint8Number of multi-headed attention heads
dim_ffint256Dimension of dense feedforward part of transformer layer
abs_pos_encodestrtAPEAbsolute Position Embedding. choices={‘tAPE’, ‘sin’, ‘learned’, None}
rel_pos_encodestreRPERelative Position Embedding. choices={‘eRPE’, ‘vector’, None}
dropoutfloat0.01Droupout regularization ratio
+
+
t = torch.randn(8, 5, 20)
+assert ConvTranBackbone(5, 20)(t).shape, (8, 16, 20)
+
+
+

source

+
+
+

ConvTranPlus

+
+
 ConvTranPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+               d_model:int=16, n_heads:int=8, dim_ff:int=256,
+               abs_pos_encode:str='tAPE', rel_pos_encode:str='eRPE',
+               encoder_dropout:float=0.01, fc_dropout:float=0.1,
+               use_bn:bool=True, flatten:bool=True, custom_head:Any=None)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintNumber of channels in input
c_outintNumber of channels in output
seq_lenintNumber of input sequence length
dtupleNoneoutput shape (excluding batch dimension).
d_modelint16Internal dimension of transformer embeddings
n_headsint8Number of multi-headed attention heads
dim_ffint256Dimension of dense feedforward part of transformer layer
abs_pos_encodestrtAPEAbsolute Position Embedding. choices={‘tAPE’, ‘sin’, ‘learned’, None}
rel_pos_encodestreRPERelative Position Embedding. choices={‘eRPE’, ‘vector’, None}
encoder_dropoutfloat0.01Droupout regularization ratio for the encoder
fc_dropoutfloat0.1Droupout regularization ratio for the head
use_bnboolTrueindicates if batchnorm will be applied to the model head.
flattenboolTruethis will flatten the output of the encoder before applying the head if True.
custom_headtyping.AnyNonecustom head that will be applied to the model head (optional).
+
+
xb = torch.randn(16, 5, 20)
+
+model = ConvTranPlus(5, 3, 20, d=None)
+output = model(xb)
+assert output.shape == (16, 3)
+
+
+
xb = torch.randn(16, 5, 20)
+
+model = ConvTranPlus(5, 3, 20, d=5)
+output = model(xb)
+assert output.shape == (16, 5, 3)
+
+
+
xb = torch.randn(16, 5, 20)
+
+model = ConvTranPlus(5, 3, 20, d=(2, 10))
+output = model(xb)
+assert output.shape == (16, 2, 10, 3)
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.explainability.html b/models.explainability.html new file mode 100644 index 000000000..d21e81e55 --- /dev/null +++ b/models.explainability.html @@ -0,0 +1,1231 @@ + + + + + + + + + +tsai - Explainability + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Explainability

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functionality to help with both global and local explainability.

+
+
+

source

+
+

get_attribution_map

+
+
 get_attribution_map (model, modules, x, y=None, detach=True, cpu=False,
+                      apply_relu=True)
+
+
+

source

+
+
+

get_acts_and_grads

+
+
 get_acts_and_grads (model, modules, x, y=None, detach=True, cpu=False)
+
+

Returns activations and gradients for given modules in a model and a single input or a batch. Gradients require y value(s). If they are not provided, it will use the predictions.

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.fcn.html b/models.fcn.html new file mode 100644 index 000000000..507b41397 --- /dev/null +++ b/models.fcn.html @@ -0,0 +1,1293 @@ + + + + + + + + + +tsai - FCN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

FCN

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:

+ +

Official FCN TensorFlow implementation: https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/fcn.py.

+

Note: kernel filter size 8 has been replaced by 7 (since we believe it’s a bug).

+
+

source

+
+

FCN

+
+
 FCN (c_in, c_out, layers=[128, 256, 128], kss=[7, 5, 3])
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+nvars = 3
+seq_len = 128
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = FCN(nvars, c_out)
+test_eq(model(xb).shape, (bs, c_out))
+model
+
+
FCN(
+  (convblock1): ConvBlock(
+    (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (convblock2): ConvBlock(
+    (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+    (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (convblock3): ConvBlock(
+    (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (gap): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Flatten(full=False)
+  )
+  (fc): Linear(in_features=128, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.fcnplus.html b/models.fcnplus.html new file mode 100644 index 000000000..10ba357f2 --- /dev/null +++ b/models.fcnplus.html @@ -0,0 +1,1340 @@ + + + + + + + + + +tsai - FCNPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

FCNPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

FCNPlus

+
+
 FCNPlus (c_in, c_out, layers=[128, 256, 128], kss=[7, 5, 3], coord=False,
+          separable=False, use_bn=False, fc_dropout=0.0, zero_norm=False,
+          act=<class 'torch.nn.modules.activation.ReLU'>, act_kwargs={},
+          residual=False, custom_head=None)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
xb = torch.rand(16, 3, 10)
+test_eq(FCNPlus(3, 2)(xb).shape, [xb.shape[0], 2])
+test_eq(FCNPlus(3, 2, coord=True, separable=True, act=Swish, residual=True)(xb).shape, [xb.shape[0], 2])
+test_eq(nn.Sequential(*FCNPlus(3, 2).children())(xb).shape, [xb.shape[0], 2])
+test_eq(FCNPlus(3, 2, custom_head=partial(mlp_head, seq_len=10))(xb).shape, [xb.shape[0], 2])
+
+
+
from tsai.models.utils import *
+
+
+
model = build_ts_model(FCNPlus, 2, 3)
+model[-1]
+
+
Sequential(
+  (0): AdaptiveAvgPool1d(output_size=1)
+  (1): Squeeze(dim=-1)
+  (2): Linear(in_features=128, out_features=3, bias=True)
+)
+
+
+
+
from tsai.models.FCN import *
+
+
+
test_eq(count_parameters(FCN(3,2)), count_parameters(FCNPlus(3,2)))
+
+
+
FCNPlus(3,2)
+
+
FCNPlus(
+  (backbone): _FCNBlockPlus(
+    (convblock1): ConvBlock(
+      (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock2): ConvBlock(
+      (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+      (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock3): ConvBlock(
+      (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (add): Sequential()
+  )
+  (head): Sequential(
+    (0): AdaptiveAvgPool1d(output_size=1)
+    (1): Squeeze(dim=-1)
+    (2): Linear(in_features=128, out_features=2, bias=True)
+  )
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.gatedtabtransformer.html b/models.gatedtabtransformer.html new file mode 100644 index 000000000..fb919c79c --- /dev/null +++ b/models.gatedtabtransformer.html @@ -0,0 +1,1302 @@ + + + + + + + + + +tsai - GatedTabTransformer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

GatedTabTransformer

+
+ + + +
+ + + + +
+ + + +
+ + + +

This implementation is based on:

+ +

Official repo: https://github.com/radi-cho/GatedTabTransformer

+
+

source

+
+

GatedTabTransformer

+
+
 GatedTabTransformer (classes, cont_names, c_out, column_embed=True,
+                      add_shared_embed=False, shared_embed_div=8,
+                      embed_dropout=0.1, drop_whole_embed=False,
+                      d_model=32, n_layers=6, n_heads=8, d_k=None,
+                      d_v=None, d_ff=None, res_attention=True,
+                      attention_act='gelu', res_dropout=0.1,
+                      norm_cont=True, mlp_d_model=32, mlp_d_ffn=64,
+                      mlp_layers=4)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
from fastcore.test import test_eq
+from fastcore.basics import first
+from fastai.data.external import untar_data, URLs
+from fastai.tabular.data import TabularDataLoaders
+from fastai.tabular.core import Categorify, FillMissing
+from fastai.data.transforms import Normalize
+import pandas as pd
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary",
+    cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],
+    cont_names = ['age', 'fnlwgt', 'education-num'],
+    procs = [Categorify, FillMissing, Normalize])
+x_cat, x_cont, yb = first(dls.train)
+model = GatedTabTransformer(dls.classes, dls.cont_names, dls.c)
+test_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.gmlp.html b/models.gmlp.html new file mode 100644 index 000000000..ed7eb3a73 --- /dev/null +++ b/models.gmlp.html @@ -0,0 +1,1285 @@ + + + + + + + + + +tsai - gMLP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

gMLP

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation based on:

+ +
+

source

+
+

gMLP

+
+
 gMLP (c_in, c_out, seq_len, patch_size=1, d_model=256, d_ffn=512,
+       depth=6)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
bs = 16
+c_in = 3
+c_out = 2
+seq_len = 64
+patch_size = 4
+xb = torch.rand(bs, c_in, seq_len)
+model = gMLP(c_in, c_out, seq_len, patch_size=patch_size)
+test_eq(model(xb).shape, (bs, c_out))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.hydramultirocketplus.html b/models.hydramultirocketplus.html new file mode 100644 index 000000000..5b96b7f0f --- /dev/null +++ b/models.hydramultirocketplus.html @@ -0,0 +1,1492 @@ + + + + + + + + + +tsai - HydraMultiRocketPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

HydraMultiRocketPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Hydra: competing convolutional kernels for fast and accurate time series classification.

+
+

This is a Pytorch implementation of Hydra-MultiRocket adapted by Ignacio Oguiza and based on:

+

Dempster, A., Schmidt, D. F., & Webb, G. I. (2023). Hydra: Competing convolutional kernels for fast and accurate time series classification. Data Mining and Knowledge Discovery, 1-27.

+

Original paper: https://link.springer.com/article/10.1007/s10618-023-00939-3

+

Original repository: https://github.com/angus924/hydra

+
+

source

+
+

HydraMultiRocketBackbonePlus

+
+
 HydraMultiRocketBackbonePlus (c_in, c_out, seq_len, d=None, k=8, g=64,
+                               max_c_in=8, clip=True, num_features=50000,
+                               max_dilations_per_kernel=32, kernel_size=9,
+                               max_num_channels=None, max_num_kernels=84,
+                               use_bn=True, fc_dropout=0,
+                               custom_head=None, zero_init=True,
+                               use_diff=True, device=device(type='cpu'))
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

HydraMultiRocketPlus

+
+
 HydraMultiRocketPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                       k:int=8, g:int=64, max_c_in:int=8, clip:bool=True,
+                       num_features:int=50000,
+                       max_dilations_per_kernel:int=32, kernel_size:int=9,
+                       max_num_channels:int=None, max_num_kernels:int=84,
+                       use_bn:bool=True, fc_dropout:float=0.0,
+                       custom_head:Any=None, zero_init:bool=True,
+                       use_diff:bool=True, device:str=device(type='cpu'))
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintnum of channels in input
c_outintnum of channels in output
seq_lenintsequence length
dtupleNoneshape of the output (when ndim > 1)
kint8number of kernels per group in HydraBackbone
gint64number of groups in HydraBackbone
max_c_inint8max number of channels per group in HydraBackbone
clipboolTrueclip values >= 0 in HydraBackbone
num_featuresint50000number of MultiRocket features
max_dilations_per_kernelint32max dilations per kernel in MultiRocket
kernel_sizeint9kernel size in MultiRocket
max_num_channelsintNonemax number of channels in MultiRocket
max_num_kernelsint84max number of kernels in MultiRocket
use_bnboolTrueuse batch norm
fc_dropoutfloat0.0dropout probability
custom_headtyping.AnyNoneoptional custom head as a torch.nn.Module or Callable
zero_initboolTrueset head weights and biases to zero
use_diffboolTrueuse diff(X) as input
devicestrcpudevice to use
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = HydraMultiRocketPlus(5, 3, 20, d=None).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = HydraMultiRocketPlus(5, 3, 20, d=None, use_diff=False).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 5, 20)).to(default_device())
+
+model = HydraMultiRocketPlus(5, 3, 20, d=20, use_diff=True).to(default_device())
+output = model(xb)
+assert output.shape == (16, 20, 3)
+output.shape
+
+
torch.Size([16, 20, 3])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.hydraplus.html b/models.hydraplus.html new file mode 100644 index 000000000..e13033c4e --- /dev/null +++ b/models.hydraplus.html @@ -0,0 +1,1454 @@ + + + + + + + + + +tsai - HydraPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

HydraPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Hydra: competing convolutional kernels for fast and accurate time series classification.

+
+

This is a Pytorch implementation of Hydra adapted by Ignacio Oguiza and based on:

+

Dempster, A., Schmidt, D. F., & Webb, G. I. (2023). Hydra: Competing convolutional kernels for fast and accurate time series classification. Data Mining and Knowledge Discovery, 1-27.

+

Original paper: https://link.springer.com/article/10.1007/s10618-023-00939-3

+

Original repository: https://github.com/angus924/hydra

+
+

source

+
+

HydraBackbonePlus

+
+
 HydraBackbonePlus (c_in, c_out, seq_len, k=8, g=64, max_c_in=8,
+                    clip=True, device=device(type='cpu'), zero_init=True)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

HydraPlus

+
+
 HydraPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None, k:int=8,
+            g:int=64, max_c_in:int=8, clip:bool=True, use_bn:bool=True,
+            fc_dropout:float=0.0, custom_head:Any=None,
+            zero_init:bool=True, use_diff:bool=True,
+            device:str=device(type='cpu'))
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintnum of channels in input
c_outintnum of channels in output
seq_lenintsequence length
dtupleNoneshape of the output (when ndim > 1)
kint8number of kernels per group
gint64number of groups
max_c_inint8max number of channels per group
clipboolTrueclip values >= 0
use_bnboolTrueuse batch norm
fc_dropoutfloat0.0dropout probability
custom_headtyping.AnyNoneoptional custom head as a torch.nn.Module or Callable
zero_initboolTrueset head weights and biases to zero
use_diffboolTrueuse diff(X) as input
devicestrcpudevice to use
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = HydraPlus(5, 3, 20, d=None).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = HydraPlus(5, 3, 20, d=None, use_diff=False).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 5, 20)).to(default_device())
+
+model = HydraPlus(5, 3, 20, d=20, use_diff=True).to(default_device())
+output = model(xb)
+assert output.shape == (16, 20, 3)
+output.shape
+
+
torch.Size([16, 20, 3])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.inceptiontime.html b/models.inceptiontime.html new file mode 100644 index 000000000..b6e9a269f --- /dev/null +++ b/models.inceptiontime.html @@ -0,0 +1,1409 @@ + + + + + + + + + +tsai - InceptionTime + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

InceptionTime

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

An ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture

+
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:

+

Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.

+

Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime

+
+

source

+
+

InceptionTime

+
+
 InceptionTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None, ks=40,
+                bottleneck=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

InceptionBlock

+
+
 InceptionBlock (ni, nf=32, residual=True, depth=6, ks=40,
+                 bottleneck=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

InceptionModule

+
+
 InceptionModule (ni, nf, ks=40, bottleneck=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.models.utils import count_parameters
+
+
+
bs = 16
+vars = 1
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, vars, seq_len)
+test_eq(InceptionTime(vars,c_out)(xb).shape, [bs, c_out])
+test_eq(InceptionTime(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])
+test_eq(InceptionTime(vars,c_out, residual=False)(xb).shape, [bs, c_out])
+test_eq(count_parameters(InceptionTime(3, 2)), 455490)
+
+
+
InceptionTime(3,2)
+
+
InceptionTime(
+  (inceptionblock): InceptionBlock(
+    (inception): ModuleList(
+      (0): InceptionModule(
+        (bottleneck): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+      (1): InceptionModule(
+        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+      (2): InceptionModule(
+        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+      (3): InceptionModule(
+        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+      (4): InceptionModule(
+        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+      (5): InceptionModule(
+        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
+          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
+          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (act): ReLU()
+      )
+    )
+    (shortcut): ModuleList(
+      (0): ConvBlock(
+        (0): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)
+        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (add): Add
+    (act): ReLU()
+  )
+  (gap): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Flatten(full=False)
+  )
+  (fc): Linear(in_features=128, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.inceptiontimeplus.html b/models.inceptiontimeplus.html new file mode 100644 index 000000000..aa18b8f21 --- /dev/null +++ b/models.inceptiontimeplus.html @@ -0,0 +1,1830 @@ + + + + + + + + + +tsai - InceptionTimePlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

InceptionTimePlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation of InceptionTime (Fawaz, 2019) created by Ignacio Oguiza.

+
+

References: * Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J., … & Petitjean, F. (2020). Inceptiontime: Finding alexnet for time series classification. Data Mining and Knowledge Discovery, 34(6), 1936-1962. * Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime

+
+

source

+
+

InceptionBlockPlus

+
+
 InceptionBlockPlus (ni, nf, residual=True, depth=6, coord=False,
+                     norm='Batch', zero_norm=False, act=<class
+                     'torch.nn.modules.activation.ReLU'>, act_kwargs={},
+                     sa=False, se=None, stoch_depth=1.0, ks=40,
+                     bottleneck=True, padding='same', separable=False,
+                     dilation=1, stride=1, conv_dropout=0.0, bn_1st=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

InceptionModulePlus

+
+
 InceptionModulePlus (ni, nf, ks=40, bottleneck=True, padding='same',
+                      coord=False, separable=False, dilation=1, stride=1,
+                      conv_dropout=0.0, sa=False, se=None, norm='Batch',
+                      zero_norm=False, bn_1st=True, act=<class
+                      'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

InceptionTimePlus

+
+
 InceptionTimePlus (c_in, c_out, seq_len=None, nf=32, nb_filters=None,
+                    flatten=False, concat_pool=False, fc_dropout=0.0,
+                    bn=False, y_range=None, custom_head=None, ks=40,
+                    bottleneck=True, padding='same', coord=False,
+                    separable=False, dilation=1, stride=1,
+                    conv_dropout=0.0, sa=False, se=None, norm='Batch',
+                    zero_norm=False, bn_1st=True, act=<class
+                    'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

XCoordTime

+
+
 XCoordTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None,
+             flatten=False, concat_pool=False, fc_dropout=0.0, bn=False,
+             y_range=None, custom_head=None, ks=40, bottleneck=True,
+             padding='same', coord=False, separable=False, dilation=1,
+             stride=1, conv_dropout=0.0, sa=False, se=None, norm='Batch',
+             zero_norm=False, bn_1st=True, act=<class
+             'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

InCoordTime

+
+
 InCoordTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None,
+              flatten=False, concat_pool=False, fc_dropout=0.0, bn=False,
+              y_range=None, custom_head=None, ks=40, bottleneck=True,
+              padding='same', coord=False, separable=False, dilation=1,
+              stride=1, conv_dropout=0.0, sa=False, se=None, norm='Batch',
+              zero_norm=False, bn_1st=True, act=<class
+              'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.data.core import TSCategorize
+from tsai.models.utils import count_parameters
+
+
+
bs = 16
+n_vars = 3
+seq_len = 51
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+
+test_eq(InceptionTimePlus(n_vars,c_out)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out,concat_pool=True)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, residual=False)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, conv_dropout=.5)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, stoch_depth=.5)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars, c_out, seq_len=seq_len, zero_norm=True, flatten=True)(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True, 
+                          norm='Instance', zero_norm=True, bn_1st=False, fc_dropout=.5, sa=True, se=True, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])
+test_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True,
+                          norm='Instance', zero_norm=True, bn_1st=False, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])
+test_eq(count_parameters(InceptionTimePlus(3, 2)), 455490)
+test_eq(count_parameters(InceptionTimePlus(6, 2, **{'coord': True, 'separable': True, 'zero_norm': True})), 77204)
+test_eq(count_parameters(InceptionTimePlus(3, 2, ks=40)), count_parameters(InceptionTimePlus(3, 2, ks=[9, 19, 39])))
+
+
+
bs = 16
+n_vars = 3
+seq_len = 51
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+
+model = InceptionTimePlus(n_vars, c_out)
+model(xb).shape
+test_eq(model[0](xb), model.backbone(xb))
+test_eq(model[1](model[0](xb)), model.head(model[0](xb)))
+test_eq(model[1].state_dict().keys(), model.head.state_dict().keys())
+test_eq(len(ts_splitter(model)), 2)
+
+
+
test_eq(check_bias(InceptionTimePlus(2,3, zero_norm=True), is_conv)[0].sum(), 0)
+test_eq(check_weight(InceptionTimePlus(2,3, zero_norm=True), is_bn)[0].sum(), 6)
+test_eq(check_weight(InceptionTimePlus(2,3), is_bn)[0], np.array([1., 1., 1., 1., 1., 1., 1., 1.]))
+
+
+
for i in range(10): InceptionTimePlus(n_vars,c_out,stoch_depth=0.8,depth=9,zero_norm=True)(xb)
+
+
+
net = InceptionTimePlus(2,3,**{'coord': True, 'separable': True, 'zero_norm': True})
+test_eq(check_weight(net, is_bn)[0], np.array([1., 1., 0., 1., 1., 0., 1., 1.]))
+net
+
+
InceptionTimePlus(
+  (backbone): Sequential(
+    (0): InceptionBlockPlus(
+      (inception): ModuleList(
+        (0): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+          (act): ReLU()
+        )
+        (1): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+          (act): ReLU()
+        )
+        (2): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (3): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+          (act): ReLU()
+        )
+        (4): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+          (act): ReLU()
+        )
+        (5): InceptionModulePlus(
+          (bottleneck): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (convs): ModuleList(
+            (0): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+            (2): ConvBlock(
+              (0): AddCoords1d()
+              (1): SeparableConv1d(
+                (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+                (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+              )
+            )
+          )
+          (mp_conv): Sequential(
+            (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+            (1): ConvBlock(
+              (0): AddCoords1d()
+              (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (concat): Concat(dim=1)
+          (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+      )
+      (shortcut): ModuleList(
+        (0): ConvBlock(
+          (0): AddCoords1d()
+          (1): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)
+          (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (act): ModuleList(
+        (0): ReLU()
+        (1): ReLU()
+      )
+      (add): Add
+    )
+  )
+  (head): Sequential(
+    (0): Sequential(
+      (0): GAP1d(
+        (gap): AdaptiveAvgPool1d(output_size=1)
+        (flatten): Reshape(bs)
+      )
+      (1): LinBnDrop(
+        (0): Linear(in_features=128, out_features=3, bias=True)
+      )
+    )
+  )
+)
+
+
+
+

source

+
+
+

MultiInceptionTimePlus

+
+
 MultiInceptionTimePlus (feat_list, c_out, seq_len=None, nf=32,
+                         nb_filters=None, depth=6, stoch_depth=1.0,
+                         flatten=False, concat_pool=False, fc_dropout=0.0,
+                         bn=False, y_range=None, custom_head=None)
+
+

Class that allows you to create a model with multiple branches of InceptionTimePlus.

+
+
bs = 16
+n_vars = 3
+seq_len = 51
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+
+test_eq(count_parameters(MultiInceptionTimePlus([1,1,1], c_out)) > count_parameters(MultiInceptionTimePlus(3, c_out)), True)
+test_eq(MultiInceptionTimePlus([1,1,1], c_out).to(xb.device)(xb).shape, MultiInceptionTimePlus(3, c_out).to(xb.device)(xb).shape)
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 10
+xb = torch.rand(bs, n_vars, seq_len)
+new_head = partial(conv_lin_nd_head, d=(5,2))
+net = MultiInceptionTimePlus(n_vars, c_out, seq_len, custom_head=new_head)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 5, 2, 10])
+
+
+
Sequential(
+  (0): create_conv_lin_nd_head(
+    (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))
+    (1): Linear(in_features=12, out_features=10, bias=True)
+    (2): Transpose(-1, -2)
+    (3): Reshape(bs, 5, 2, 10)
+  )
+)
+
+
+
+
bs = 16
+n_vars = 6
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+net = MultiInceptionTimePlus([1,2,3], c_out, seq_len)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 2])
+
+
+
Sequential(
+  (0): Sequential(
+    (0): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+    (1): LinBnDrop(
+      (0): Linear(in_features=384, out_features=2, bias=True)
+    )
+  )
+)
+
+
+
+
bs = 8
+c_in = 7  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 10
+xb2 = torch.randn(bs, c_in, seq_len)
+model1 = MultiInceptionTimePlus([2, 5], c_out, seq_len)
+model2 = MultiInceptionTimePlus([[0,2,5], [0,1,3,4,6]], c_out, seq_len)
+test_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))
+test_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)
+
+
+
from tsai.data.external import *
+from tsai.data.core import *
+from tsai.data.preprocessing import *
+
+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms  = [None, [TSCategorize()]]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+model = InceptionTimePlus(dls.vars, dls.c, dls.len)
+xb,yb=first(dls.train)
+test_eq(model.to(xb.device)(xb).shape, (dls.bs, dls.c))
+test_eq(count_parameters(model), 460038)
+
+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms  = [None, [TSCategorize()]]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+model = MultiInceptionTimePlus([4, 15, 5], dls.c, dls.len)
+xb,yb=first(dls.train)
+test_eq(model.to(xb.device)(xb).shape, (dls.bs, dls.c))
+test_eq(count_parameters(model), 1370886)
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.layers.html b/models.layers.html new file mode 100644 index 000000000..d9829914c --- /dev/null +++ b/models.layers.html @@ -0,0 +1,3574 @@ + + + + + + + + + +tsai - Layers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Layers

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Helper functions used to build PyTorch timeseries models.

+
+
+

source

+
+

test_module_to_torchscript

+
+
 test_module_to_torchscript (m:torch.nn.modules.module.Module,
+                             inputs:torch.Tensor, trace:bool=True,
+                             script:bool=True, serialize:bool=True,
+                             verbose:bool=True)
+
+

Tests if a PyTorch module can be correctly traced or scripted and serialized

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
mModuleThe PyTorch module to be tested.
inputsTensorA tensor or tuple of tensors representing the inputs to the model.
traceboolTrueIf True, attempts to trace the model. Defaults to True.
scriptboolTrueIf True, attempts to script the model. Defaults to True.
serializeboolTrueIf True, saves and loads the traced/scripted module to ensure it can be serialized. Defaults to True.
verboseboolTrueIf True, prints detailed information about the tracing and scripting process. Defaults to True.
+
+
m = nn.Linear(10, 2)
+inp = torch.randn(3, 10)
+test_module_to_torchscript(m, inp, trace=True, script=True, serialize=True, verbose=True)
+
+
output.shape: torch.Size([3, 2])
+Tracing...
+...Linear has been successfully traced 😃
+
+
+
+
True
+
+
+
+

source

+
+
+

init_lin_zero

+
+
 init_lin_zero (m)
+
+
+

source

+
+
+

SwishBeta

+
+
 SwishBeta ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

SmeLU

+
+
 SmeLU (beta:float=2.0)
+
+

Smooth ReLU activation function based on https://arxiv.org/pdf/2202.06499.pdf

+ + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
betafloat2.0Beta value
ReturnsNone
+
+

source

+
+
+

Chomp1d

+
+
 Chomp1d (chomp_size)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

SameConv1d

+
+
 SameConv1d (ni, nf, ks=3, stride=1, dilation=1, **kwargs)
+
+

Conv1d with padding=‘same’

+
+

source

+
+
+

Pad1d

+
+
 Pad1d (padding, value=0.0)
+
+

Pads the input tensor boundaries with a constant value.

+

For N-dimensional padding, use :func:torch.nn.functional.pad().

+

Args: padding (int, tuple): the size of the padding. If is int, uses the same padding in both boundaries. If a 2-tuple, uses (:math:\text{padding\_left}, :math:\text{padding\_right})

+

Shape: - Input: :math:(C, W_{in}) or :math:(N, C, W_{in}). - Output: :math:(C, W_{out}) or :math:(N, C, W_{out}), where

+
  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
+

Examples::

+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
+>>> m = nn.ConstantPad1d(2, 3.5)
+>>> input = torch.randn(1, 2, 4)
+>>> input
+tensor([[[-1.0491, -0.7152, -0.0749,  0.8530],
+         [-1.3287,  1.8966,  0.1466, -0.2771]]])
+>>> m(input)
+tensor([[[ 3.5000,  3.5000, -1.0491, -0.7152, -0.0749,  0.8530,  3.5000,
+           3.5000],
+         [ 3.5000,  3.5000, -1.3287,  1.8966,  0.1466, -0.2771,  3.5000,
+           3.5000]]])
+>>> m = nn.ConstantPad1d(2, 3.5)
+>>> input = torch.randn(1, 2, 3)
+>>> input
+tensor([[[ 1.6616,  1.4523, -1.1255],
+         [-3.6372,  0.1182, -1.8652]]])
+>>> m(input)
+tensor([[[ 3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000,  3.5000],
+         [ 3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000,  3.5000]]])
+>>> # using different paddings for different sides
+>>> m = nn.ConstantPad1d((3, 1), 3.5)
+>>> m(input)
+tensor([[[ 3.5000,  3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000],
+         [ 3.5000,  3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000]]])
+
+

source

+
+
+

same_padding1d

+
+
 same_padding1d (seq_len, ks, stride=1, dilation=1)
+
+

Same padding formula as used in Tensorflow

+
+

source

+
+
+

Conv2d

+
+
 Conv2d (ni, nf, kernel_size=None, ks=None, stride=1, padding='same',
+         dilation=1, init='auto', bias_std=0.01, **kwargs)
+
+

conv1d layer with padding=‘same’, ‘valid’, or any integer (defaults to ‘same’)

+
+

source

+
+
+

Conv2dSame

+
+
 Conv2dSame (ni, nf, ks=(3, 3), stride=(1, 1), dilation=(1, 1), **kwargs)
+
+

Conv2d with padding=‘same’

+
+

source

+
+
+

Pad2d

+
+
 Pad2d (padding, value=0.0)
+
+

Pads the input tensor boundaries with a constant value.

+

For N-dimensional padding, use :func:torch.nn.functional.pad().

+

Args: padding (int, tuple): the size of the padding. If is int, uses the same padding in all boundaries. If a 4-tuple, uses (:math:\text{padding\_left}, :math:\text{padding\_right}, :math:\text{padding\_top}, :math:\text{padding\_bottom})

+

Shape: - Input: :math:(N, C, H_{in}, W_{in}) or :math:(C, H_{in}, W_{in}). - Output: :math:(N, C, H_{out}, W_{out}) or :math:(C, H_{out}, W_{out}), where

+
  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
+
+  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
+

Examples::

+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
+>>> m = nn.ConstantPad2d(2, 3.5)
+>>> input = torch.randn(1, 2, 2)
+>>> input
+tensor([[[ 1.6585,  0.4320],
+         [-0.8701, -0.4649]]])
+>>> m(input)
+tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  1.6585,  0.4320,  3.5000,  3.5000],
+         [ 3.5000,  3.5000, -0.8701, -0.4649,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
+>>> # using different paddings for different sides
+>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
+>>> m(input)
+tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
+         [ 3.5000,  3.5000,  3.5000,  1.6585,  0.4320],
+         [ 3.5000,  3.5000,  3.5000, -0.8701, -0.4649],
+         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
+
+

source

+
+
+

same_padding2d

+
+
 same_padding2d (H, W, ks, stride=(1, 1), dilation=(1, 1))
+
+

Same padding formula as used in Tensorflow

+
+
bs = 2
+c_in = 3
+c_out = 5
+h = 16
+w = 20
+t = torch.rand(bs, c_in, h, w)
+test_eq(Conv2dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
+test_eq(Conv2dSame(c_in, c_out, ks=(3, 1), stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
+test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(1, 1), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h, w))
+test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(1, 1), bias=False)(t).shape, (bs, c_out, h//2, w//2))
+test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h//2, w//2))
+test_eq(Conv2d(c_in, c_out, ks=3, padding='same', stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
+
+
+

source

+
+
+

CausalConv1d

+
+
 CausalConv1d (ni, nf, ks, stride=1, dilation=1, groups=1, bias=True)
+
+

Applies a 1D convolution over an input signal composed of several input planes.

+

In the simplest case, the output value of the layer with input size :math:(N, C_{\text{in}}, L) and output :math:(N, C_{\text{out}}, L_{\text{out}}) can be precisely described as:

+

.. math:: (N_i, C_{j}) = (C{j}) + {k = 0}^{C_{in} - 1} (C_{_j}, k) (N_i, k)

+

where :math:\star is the valid cross-correlation_ operator, :math:N is a batch size, :math:C denotes a number of channels, :math:L is a length of signal sequence.

+

This module supports :ref:TensorFloat32<tf32_on_ampere>.

+

On certain ROCm devices, when using float16 inputs this module will use :ref:different precision<fp16_on_mi200> for backward.

+
    +
  • :attr:stride controls the stride for the cross-correlation, a single number or a one-element tuple.

  • +
  • :attr:padding controls the amount of padding applied to the input. It can be either a string {‘valid’, ‘same’} or a tuple of ints giving the amount of implicit padding applied on both sides.

  • +
  • :attr:dilation controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this link_ has a nice visualization of what :attr:dilation does.

  • +
  • :attr:groups controls the connections between inputs and outputs. :attr:in_channels and :attr:out_channels must both be divisible by :attr:groups. For example,

    +
      +
    • At groups=1, all inputs are convolved to all outputs.
    • +
    • At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels and producing half the output channels, and both subsequently concatenated.
    • +
    • At groups= :attr:in_channels, each input channel is convolved with its own set of filters (of size :math:\frac{\text{out\_channels}}{\text{in\_channels}}).
    • +
  • +
+

Note: When groups == in_channels and out_channels == K * in_channels, where K is a positive integer, this operation is also known as a “depthwise convolution”.

+
In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
+a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
+:math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`.
+

Note: In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting torch.backends.cudnn.deterministic = True. See :doc:/notes/randomness for more information.

+

Note: padding='valid' is the same as no padding. padding='same' pads the input so the output has the shape as the input. However, this mode doesn’t support any stride values other than 1.

+

Note: This module supports complex data types i.e. complex32, complex64, complex128.

+

Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int, tuple or str, optional): Padding added to both sides of the input. Default: 0 padding_mode (str, optional): 'zeros', 'reflect', 'replicate' or 'circular'. Default: 'zeros' dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True

+

Shape: - Input: :math:(N, C_{in}, L_{in}) or :math:(C_{in}, L_{in}) - Output: :math:(N, C_{out}, L_{out}) or :math:(C_{out}, L_{out}), where

+
  .. math::
+      L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
+                \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
+

Attributes: weight (Tensor): the learnable weights of the module of shape :math:(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size}). The values of these weights are sampled from :math:\mathcal{U}(-\sqrt{k}, \sqrt{k}) where :math:k = \frac{groups}{C_\text{in} * \text{kernel\_size}} bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:bias is True, then the values of these weights are sampled from :math:\mathcal{U}(-\sqrt{k}, \sqrt{k}) where :math:k = \frac{groups}{C_\text{in} * \text{kernel\_size}}

+

Examples::

+
>>> m = nn.Conv1d(16, 33, 3, stride=2)
+>>> input = torch.randn(20, 16, 50)
+>>> output = m(input)
+

.. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation

+

.. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md

+
+

source

+
+
+

Conv1d

+
+
 Conv1d (ni, nf, kernel_size=None, ks=None, stride=1, padding='same',
+         dilation=1, init='auto', bias_std=0.01, **kwargs)
+
+

conv1d layer with padding=‘same’, ‘causal’, ‘valid’, or any integer (defaults to ‘same’)

+
+
bs = 2
+c_in = 3
+c_out = 5
+seq_len = 512
+t = torch.rand(bs, c_in, seq_len)
+dilation = 1
+test_eq(CausalConv1d(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1d(c_in, c_out, ks=3, padding="same", dilation=dilation)(t).shape)
+dilation = 2
+test_eq(CausalConv1d(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1d(c_in, c_out, ks=3, padding="same", dilation=dilation)(t).shape)
+
+
+
bs = 2
+ni = 3
+nf = 5
+seq_len = 6
+ks = 3
+t = torch.rand(bs, c_in, seq_len)
+test_eq(Conv1d(ni, nf, ks, padding=0)(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
+test_eq(Conv1d(ni, nf, ks, padding='valid')(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
+test_eq(Conv1d(ni, nf, ks, padding='same')(t).shape, (bs, c_out, seq_len))
+test_eq(Conv1d(ni, nf, ks, padding='causal')(t).shape, (bs, c_out, seq_len))
+test_error('use kernel_size or ks but not both simultaneously', Conv1d, ni, nf, kernel_size=3, ks=3)
+test_error('you need to pass a ks', Conv1d, ni, nf)
+
+
+
conv = Conv1d(ni, nf, ks, padding='same')
+init_linear(conv, None, init='auto', bias_std=.01)
+conv
+
+
Conv1d(3, 5, kernel_size=(3,), stride=(1,), padding=(1,))
+
+
+
+
conv = Conv1d(ni, nf, ks, padding='causal')
+init_linear(conv, None, init='auto', bias_std=.01)
+conv
+
+
CausalConv1d(3, 5, kernel_size=(3,), stride=(1,))
+
+
+
+
conv = Conv1d(ni, nf, ks, padding='valid')
+init_linear(conv, None, init='auto', bias_std=.01)
+weight_norm(conv)
+conv
+
+
Conv1d(3, 5, kernel_size=(3,), stride=(1,))
+
+
+
+
conv = Conv1d(ni, nf, ks, padding=0)
+init_linear(conv, None, init='auto', bias_std=.01)
+weight_norm(conv)
+conv
+
+
Conv1d(3, 5, kernel_size=(3,), stride=(1,))
+
+
+
+

source

+
+
+

SeparableConv1d

+
+
 SeparableConv1d (ni, nf, ks, stride=1, padding='same', dilation=1,
+                  bias=True, bias_std=0.01)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 64
+c_in = 6
+c_out = 5
+seq_len = 512
+t = torch.rand(bs, c_in, seq_len)
+test_eq(SeparableConv1d(c_in, c_out, 3)(t).shape, (bs, c_out, seq_len))
+
+
+

source

+
+
+

AddCoords1d

+
+
 AddCoords1d ()
+
+

Add coordinates to ease position identification without modifying mean and std

+
+
bs = 2
+c_in = 3
+c_out = 5
+seq_len = 50
+
+t = torch.rand(bs, c_in, seq_len)
+t = (t - t.mean()) / t.std()
+test_eq(AddCoords1d()(t).shape, (bs, c_in + 1, seq_len))
+new_t = AddCoords1d()(t)
+test_close(new_t.mean(),0, 1e-2)
+test_close(new_t.std(), 1, 1e-2)
+
+
+

source

+
+
+

ConvBlock

+
+
 ConvBlock (ni, nf, kernel_size=None, ks=3, stride=1, padding='same',
+            bias=None, bias_std=0.01, norm='Batch', zero_norm=False,
+            bn_1st=True, act=<class 'torch.nn.modules.activation.ReLU'>,
+            act_kwargs={}, init='auto', dropout=0.0, xtra=None,
+            coord=False, separable=False, **kwargs)
+
+

Create a sequence of conv1d (ni to nf), activation (if act_cls) and norm_type layers.

+
+

source

+
+
+

ResBlock1dPlus

+
+
 ResBlock1dPlus (expansion, ni, nf, coord=False, stride=1, groups=1,
+                 reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+                 sa=False, sym=False, norm='Batch', zero_norm=True,
+                 act_cls=<class 'torch.nn.modules.activation.ReLU'>, ks=3,
+                 pool=<function AvgPool>, pool_first=True, **kwargs)
+
+

Resnet block from ni to nh with stride

+
+

source

+
+
+

SEModule1d

+
+
 SEModule1d (ni, reduction=16, act=<class
+             'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

Squeeze and excitation module for 1d

+
+
t = torch.rand(8, 32, 12)
+test_eq(SEModule1d(t.shape[1], 16, act=nn.ReLU, act_kwargs={})(t).shape, t.shape)
+
+
+

source

+
+
+

Norm

+
+
 Norm (nf, ndim=1, norm='Batch', zero_norm=False, init=True, **kwargs)
+
+

Norm layer with nf features and ndim with auto init.

+
+
bs = 2
+ni = 3
+nf = 5
+sl = 4
+ks = 5
+
+t = torch.rand(bs, ni, sl)
+test_eq(ConvBlock(ni, nf, ks)(t).shape, (bs, nf, sl))
+test_eq(ConvBlock(ni, nf, ks, padding='causal')(t).shape, (bs, nf, sl))
+test_eq(ConvBlock(ni, nf, ks, coord=True)(t).shape, (bs, nf, sl))
+
+
+
test_eq(BN1d(ni)(t).shape, (bs, ni, sl))
+test_eq(BN1d(ni).weight.data.mean().item(), 1.)
+test_eq(BN1d(ni, zero_norm=True).weight.data.mean().item(), 0.)
+
+
+
test_eq(ConvBlock(ni, nf, ks, norm='batch', zero_norm=True)[1].weight.data.unique().item(), 0)
+test_ne(ConvBlock(ni, nf, ks, norm='batch', zero_norm=False)[1].weight.data.unique().item(), 0)
+test_eq(ConvBlock(ni, nf, ks, bias=False)[0].bias, None)
+ConvBlock(ni, nf, ks, act=Swish, coord=True)
+
+
ConvBlock(
+  (0): AddCoords1d()
+  (1): Conv1d(4, 5, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+  (2): BatchNorm1d(5, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (3): Swish()
+)
+
+
+
+

source

+
+
+

LinLnDrop

+
+
 LinLnDrop (n_in, n_out, ln=True, p=0.0, act=None, lin_first=False)
+
+

Module grouping LayerNorm1d, Dropout and Linear layers

+
+
LinLnDrop(2, 3, p=.5)
+
+
LinLnDrop(
+  (0): LayerNorm((2,), eps=1e-05, elementwise_affine=True)
+  (1): Dropout(p=0.5, inplace=False)
+  (2): Linear(in_features=2, out_features=3, bias=False)
+)
+
+
+
+

source

+
+
+

LambdaPlus

+
+
 LambdaPlus (func, *args, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

ReZero

+
+
 ReZero (module)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Clip

+
+
 Clip (min=None, max=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Clamp

+
+
 Clamp (min=None, max=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

SoftMax

+
+
 SoftMax (dim=-1)
+
+

SoftMax layer

+
+

source

+
+
+

LastStep

+
+
 LastStep ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Max

+
+
 Max (dim=None, keepdim=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Reshape

+
+
 Reshape (*shape)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

View

+
+
 View (*shape)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Transpose

+
+
 Transpose (*dims, contiguous=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Permute

+
+
 Permute (*dims)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Unfold

+
+
 Unfold (dim, size, step=1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Concat

+
+
 Concat (dim=1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Add

+
+
 Add ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Unsqueeze

+
+
 Unsqueeze (dim=-1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

Squeeze

+
+
 Squeeze (dim=-1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 2
+nf = 5
+sl = 4
+
+t = torch.rand(bs, nf, sl)
+test_eq(Permute(0,2,1)(t).shape, (bs, sl, nf))
+test_eq(Max(1)(t).shape, (bs, sl))
+test_eq(Transpose(1,2)(t).shape, (bs, sl, nf))
+test_eq(Transpose(1,2, contiguous=True)(t).shape, (bs, sl, nf))
+test_eq(View(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
+test_eq(Reshape(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
+test_eq(Reshape()(t).shape, (2, 20))
+test_eq(Reshape(-1)(t).shape, (40,))
+Transpose(1,2), Permute(0,2,1), View(-1, 2, 10), Transpose(1,2, contiguous=True), Reshape(-1, 2, 10), Noop
+
+
(Transpose(1, 2),
+ Permute(dims=0, 2, 1),
+ View(bs, -1, 2, 10),
+ Transpose(dims=1, 2).contiguous(),
+ Reshape(bs, -1, 2, 10),
+ Sequential())
+
+
+
+

source

+
+
+

DropPath

+
+
 DropPath (p=None)
+
+

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

+

It’s similar to Dropout but it drops individual connections instead of nodes. Original code in https://github.com/rwightman/pytorch-image-models (timm library)

+
+
t = torch.ones(100,2,3)
+test_eq(DropPath(0.)(t), t)
+assert DropPath(0.5)(t).max() >= 1
+
+
+

source

+
+
+

Sharpen

+
+
 Sharpen (T=0.5)
+
+

This is used to increase confidence in predictions - MixMatch paper

+
+
n_samples = 1000
+n_classes = 3
+
+t = (torch.rand(n_samples, n_classes) - .5) * 10
+probas = F.softmax(t, -1)
+sharpened_probas = Sharpen()(probas)
+plt.plot(probas.flatten().sort().values, color='r')
+plt.plot(sharpened_probas.flatten().sort().values, color='b')
+plt.show()
+test_gt(sharpened_probas[n_samples//2:].max(-1).values.sum().item(), probas[n_samples//2:].max(-1).values.sum().item())
+
+
+
+

+
+
+
+
+
+

source

+
+
+

Sequential

+
+
 Sequential (*args)
+
+

Class that allows you to pass one or multiple inputs

+
+

source

+
+
+

TimeDistributed

+
+
 TimeDistributed (module, batch_first=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

get_calibrator

+
+
 get_calibrator (calibrator=None, n_classes=1, **kwargs)
+
+
+

source

+
+
+

Matrix_Scale

+
+
 Matrix_Scale (n_classes=1, dirichlet=False)
+
+

Used to perform Matrix Scaling (dirichlet=False) or Dirichlet calibration (dirichlet=True)

+
+

source

+
+
+

Vector_Scale

+
+
 Vector_Scale (n_classes=1, dirichlet=False)
+
+

Used to perform Vector Scaling (dirichlet=False) or Diagonal Dirichlet calibration (dirichlet=True)

+
+

source

+
+
+

Temp_Scale

+
+
 Temp_Scale (temp=1.0, dirichlet=False)
+
+

Used to perform Temperature Scaling (dirichlet=False) or Single-parameter Dirichlet calibration (dirichlet=True)

+
+
bs = 2
+c_out = 3
+
+t = torch.rand(bs, c_out)
+for calibrator, cal_name in zip(['temp', 'vector', 'matrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']): 
+    cal = get_calibrator(calibrator, n_classes=c_out)
+#     print(calibrator)
+#     print(cal.weight, cal.bias, '\n')
+    test_eq(cal(t), t)
+    test_eq(cal.__class__.__name__, cal_name)
+for calibrator, cal_name in zip(['dtemp', 'dvector', 'dmatrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']):
+    cal = get_calibrator(calibrator, n_classes=c_out)
+#     print(calibrator)
+#     print(cal.weight, cal.bias, '\n')
+    test_eq(cal(t), F.log_softmax(t, dim=1))
+    test_eq(cal.__class__.__name__, cal_name)
+
+
+
bs = 2
+c_out = 3
+
+t = torch.rand(bs, c_out)
+
+test_eq(Temp_Scale()(t).shape, t.shape)
+test_eq(Vector_Scale(c_out)(t).shape, t.shape)
+test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
+test_eq(Temp_Scale(dirichlet=True)(t).shape, t.shape)
+test_eq(Vector_Scale(c_out, dirichlet=True)(t).shape, t.shape)
+test_eq(Matrix_Scale(c_out, dirichlet=True)(t).shape, t.shape)
+
+test_eq(Temp_Scale()(t), t)
+test_eq(Vector_Scale(c_out)(t), t)
+test_eq(Matrix_Scale(c_out)(t), t)
+
+
+
bs = 2
+c_out = 5
+
+t = torch.rand(bs, c_out)
+test_eq(Vector_Scale(c_out)(t), t)
+test_eq(Vector_Scale(c_out).weight.data, torch.ones(c_out))
+test_eq(Vector_Scale(c_out).weight.requires_grad, True)
+test_eq(type(Vector_Scale(c_out).weight), torch.nn.parameter.Parameter)
+
+
+
bs = 2
+c_out = 3
+weight = 2
+bias = 1
+
+t = torch.rand(bs, c_out)
+test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
+test_eq(Matrix_Scale(c_out).weight.requires_grad, True)
+test_eq(type(Matrix_Scale(c_out).weight), torch.nn.parameter.Parameter)
+
+
+

source

+
+
+

LogitAdjustmentLayer

+
+
 LogitAdjustmentLayer (class_priors)
+
+

Logit Adjustment for imbalanced datasets

+
+
bs, n_classes = 16, 3
+class_priors = torch.rand(n_classes)
+logits = torch.randn(bs, n_classes) * 2
+test_eq(LogitAdjLayer(class_priors)(logits), logits + class_priors)
+
+
+

source

+
+
+

MaxPPVPool1d

+
+
 MaxPPVPool1d ()
+
+

Drop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2

+
+

source

+
+
+

PPAuc

+
+
 PPAuc (dim=-1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

PPV

+
+
 PPV (dim=-1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 2
+nf = 5
+sl = 4
+
+t = torch.rand(bs, nf, sl)
+test_eq(MaxPPVPool1d()(t).shape, (bs, nf*2, 1))
+test_eq(MaxPPVPool1d()(t).shape, AdaptiveConcatPool1d(1)(t).shape)
+
+
+

source

+
+
+

AdaptiveWeightedAvgPool1d

+
+
 AdaptiveWeightedAvgPool1d (n_in, seq_len, mult=2, n_layers=2, ln=False,
+                            dropout=0.5, act=ReLU(), zero_init=True)
+
+

Global Pooling layer that performs a weighted average along the temporal axis

+

It can be considered as a channel-wise form of local temporal attention. Inspired by the paper: Hyun, J., Seong, H., & Kim, E. (2019). Universal Pooling–A New Pooling Method for Convolutional Neural Networks. arXiv preprint arXiv:1907.11440.

+
+

source

+
+
+

GAWP1d

+
+
 GAWP1d (n_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=ReLU(),
+         zero_init=False)
+
+

Global AdaptiveWeightedAvgPool1d + Flatten

+
+

source

+
+
+

GACP1d

+
+
 GACP1d (output_size=1)
+
+

Global AdaptiveConcatPool + Flatten

+
+

source

+
+
+

GAP1d

+
+
 GAP1d (output_size=1)
+
+

Global Adaptive Pooling + Flatten

+
+

source

+
+
+

gwa_pool_head

+
+
 gwa_pool_head (n_in, c_out, seq_len, bn=True, fc_dropout=0.0)
+
+
+

source

+
+
+

GlobalWeightedAveragePool1d

+
+
 GlobalWeightedAveragePool1d (n_in, seq_len)
+
+

Global Weighted Average Pooling layer

+

Inspired by Building Efficient CNN Architecture for Offline Handwritten Chinese Character Recognition https://arxiv.org/pdf/1804.01259.pdf

+
+
t = torch.randn(16, 64, 50)
+head = gwa_pool_head(64, 5, 50)
+test_eq(head(t).shape, (16, 5))
+
+
+

source

+
+
+

attentional_pool_head

+
+
 attentional_pool_head (n_in, c_out, seq_len=None, bn=True, **kwargs)
+
+
+

source

+
+
+

GAttP1d

+
+
 GAttP1d (n_in, c_out, bn=False)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

AttentionalPool1d

+
+
 AttentionalPool1d (n_in, c_out, bn=False)
+
+

Global Adaptive Pooling layer inspired by Attentional Pooling for Action Recognition https://arxiv.org/abs/1711.01467

+
+
bs, c_in, seq_len = 16, 1, 50
+c_out = 3
+t = torch.rand(bs, c_in, seq_len)
+test_eq(GAP1d()(t).shape, (bs, c_in))
+test_eq(GACP1d()(t).shape, (bs, c_in*2))
+bs, c_in, seq_len = 16, 4, 50
+t = torch.rand(bs, c_in, seq_len)
+test_eq(GAP1d()(t).shape, (bs, c_in))
+test_eq(GACP1d()(t).shape, (bs, c_in*2))
+test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
+test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
+test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=False)(t).shape, (bs, c_in))
+test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=True)(t).shape, (bs, c_in))
+test_eq(AttentionalPool1d(c_in, c_out)(t).shape, (bs, c_out, 1))
+
+
+
bs, c_in, seq_len = 16, 128, 50
+c_out = 14
+t = torch.rand(bs, c_in, seq_len)
+attp = attentional_pool_head(c_in, c_out)
+test_eq(attp(t).shape, (bs, c_out))
+
+
+

source

+
+
+

PoolingLayer

+
+
 PoolingLayer (method='cls', seq_len=None, token=True, seq_last=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
t = torch.arange(24).reshape(2, 3, 4).float()
+test_eq(PoolingLayer('cls', token=True, seq_last=True)(t), tensor([[ 0.,  4.,  8.], [12., 16., 20.]]))
+test_eq(PoolingLayer('max', token=True, seq_last=True)(t), tensor([[ 3.,  7., 11.], [15., 19., 23.]]))
+test_close(PoolingLayer('mean', token=True, seq_last=True)(t), tensor([[ 2.,  6., 10.], [14., 18., 22.]]))
+test_close(PoolingLayer('max-mean', token=True, seq_last=True)(t), tensor([[ 3.,  7., 11.,  2.,  6., 10.],
+                                                                           [15., 19., 23., 14., 18., 22.]]))
+test_close(PoolingLayer('flatten', token=True, seq_last=True)(t), tensor([[ 1.,  2.,  3.,  5.,  6.,  7.,  9., 10., 11.],
+                                                                          [13., 14., 15., 17., 18., 19., 21., 22., 23.]]))
+test_eq(PoolingLayer('linear', seq_len=4, token=True, seq_last=True)(t).shape, (2, 3))
+test_eq(PoolingLayer('max', token=False, seq_last=True)(t), tensor([[ 3.,  7., 11.], [15., 19., 23.]]))
+test_close(PoolingLayer('mean', token=False, seq_last=True)(t), tensor([[ 1.5000,  5.5000,  9.5000],
+                                                                        [13.5000, 17.5000, 21.5000]]))
+test_close(PoolingLayer('max-mean', token=False, seq_last=True)(t), tensor([[ 3.,  7., 11.,  1.5000,  5.5000,  9.5000],
+                                                                            [15., 19., 23., 13.5000, 17.5000, 21.5000]]))
+test_close(PoolingLayer('flatten', token=False, seq_last=True)(t), tensor([[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9., 10., 11.],
+                                                                           [12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.]]))
+test_eq(PoolingLayer('linear', seq_len=4, token=False, seq_last=True)(t).shape, (2, 3))
+
+
+
t = torch.arange(24).reshape(2, 3, 4).swapaxes(1,2).float()
+test_eq(PoolingLayer('cls', token=True, seq_last=False)(t), tensor([[ 0.,  4.,  8.], [12., 16., 20.]]))
+test_eq(PoolingLayer('max', token=True, seq_last=False)(t), tensor([[ 3.,  7., 11.], [15., 19., 23.]]))
+test_close(PoolingLayer('mean', token=True, seq_last=False)(t), tensor([[ 2.,  6., 10.], [14., 18., 22.]]))
+test_close(PoolingLayer('max-mean', token=True, seq_last=False)(t), tensor([[ 3.,  7., 11.,  2.,  6., 10.],
+                                                                           [15., 19., 23., 14., 18., 22.]]))
+test_close(PoolingLayer('flatten', token=True, seq_last=False)(t), tensor([[ 1.,  5.,  9.,  2.,  6., 10.,  3.,  7., 11.],
+                                                                           [13., 17., 21., 14., 18., 22., 15., 19., 23.]]))
+t = torch.arange(24).reshape(2, 3, 4).swapaxes(1,2).float()
+test_eq(PoolingLayer('conv1d', seq_len=4, token=False, seq_last=False)(t).shape, (2, 3))
+test_eq(PoolingLayer('max', token=False, seq_last=False)(t), tensor([[ 3.,  7., 11.], [15., 19., 23.]]))
+test_close(PoolingLayer('mean', token=False, seq_last=False)(t), tensor([[ 1.5000,  5.5000,  9.5000],
+                                                                        [13.5000, 17.5000, 21.5000]]))
+test_close(PoolingLayer('max-mean', token=False, seq_last=False)(t), tensor([[ 3.,  7., 11.,  1.5000,  5.5000,  9.5000],
+                                                                            [15., 19., 23., 13.5000, 17.5000, 21.5000]]))
+test_close(PoolingLayer('flatten', token=False, seq_last=False)(t), tensor([[ 0.,  4.,  8.,  1.,  5.,  9.,  2.,  6., 10.,  3.,  7., 11.],
+                                                                            [12., 16., 20., 13., 17., 21., 14., 18., 22., 15., 19., 23.]]))
+test_eq(PoolingLayer('conv1d', seq_len=4, token=False, seq_last=False)(t).shape, (2, 3))
+
+
+

source

+
+
+

ReGLU

+
+
 ReGLU ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

GEGLU

+
+
 GEGLU ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

get_act_fn

+
+
 get_act_fn (act, **act_kwargs)
+
+
+
test_eq(get_act_fn(nn.ReLU).__repr__(), "ReLU()")
+test_eq(get_act_fn(nn.ReLU()).__repr__(), "ReLU()")
+test_eq(get_act_fn(nn.LeakyReLU, negative_slope=0.05).__repr__(), "LeakyReLU(negative_slope=0.05)")
+test_eq(get_act_fn('reglu').__repr__(), "ReGLU()")
+test_eq(get_act_fn('leakyrelu', negative_slope=0.05).__repr__(), "LeakyReLU(negative_slope=0.05)")
+
+
+

source

+
+
+

RevIN

+
+
 RevIN (c_in:int, affine:bool=True, subtract_last:bool=False, dim:int=2,
+        eps:float=1e-05)
+
+

Reversible Instance Normalization layer adapted from

+

Kim, T., Kim, J., Tae, Y., Park, C., Choi, J. H., & Choo, J. (2021, September). Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations. Original code: https://github.com/ts-kim/RevIN

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inint#features (aka variables or channels)
affineboolTrueflag to incidate if RevIN has learnable weight and bias
subtract_lastboolFalse
dimint2int or tuple of dimensions used to calculate mean and std
epsfloat1e-05epsilon - parameter added for numerical stability
+
+

source

+
+
+

RevIN

+
+
 RevIN (c_in:int, affine:bool=True, subtract_last:bool=False, dim:int=2,
+        eps:float=1e-05)
+
+

Reversible Instance Normalization layer adapted from

+

Kim, T., Kim, J., Tae, Y., Park, C., Choi, J. H., & Choo, J. (2021, September). Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations. Original code: https://github.com/ts-kim/RevIN

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inint#features (aka variables or channels)
affineboolTrueflag to incidate if RevIN has learnable weight and bias
subtract_lastboolFalse
dimint2int or tuple of dimensions used to calculate mean and std
epsfloat1e-05epsilon - parameter added for numerical stability
+
+
t = ((torch.rand(16, 5, 100) - .25) * torch.Tensor([.01, .1, 1, 10, 100]).reshape(1, -1, 1)).cumsum(-1)
+t_clone = t.clone()
+l = RevIN(5)
+t_norm = l(t, torch.tensor(True))
+t_denorm = l(t_norm, torch.tensor(False))
+test_close(t_clone, t_denorm, eps=1e-3)
+
+
+
model = RevIN(5, affine=True)
+try:
+    scripted_model = torch.jit.script(model)
+    file_path = f"test_scripted_model.pt"
+    torch.jit.save(scripted_model, file_path)
+    scripted_model = torch.jit.load(file_path)
+
+    inp = ((torch.rand(16, 5, 100) - .25) * torch.Tensor([.01, .1, 1, 10, 100]).reshape(1, -1, 1)).cumsum(-1)
+    normed_output = model(inp, torch.tensor(True))
+    demormed_output = model(normed_output, torch.tensor(False))
+    scripted_normed_output = scripted_model(inp, torch.tensor(True))
+    scripted_denormed_output = scripted_model(scripted_normed_output, torch.tensor(False))
+    test_close(normed_output, scripted_normed_output)
+    test_close(demormed_output, scripted_denormed_output)
+    os.remove(file_path)
+    del scripted_model
+    gc.collect()
+    print('scripting ok')
+except Exception as e:
+    print(f'scripting failed: {e}')
+
+
scripting ok
+
+
+
+

source

+
+
+

create_pool_head

+
+
 create_pool_head (n_in, c_out, seq_len=None, concat_pool=False,
+                   fc_dropout=0.0, bn=False, y_range=None, **kwargs)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+test_eq(create_pool_head(nf, c_out, seq_len, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
+create_pool_head(nf, c_out, seq_len, concat_pool=True, bn=True, fc_dropout=.5)
+
+
Sequential(
+  (0): GACP1d(
+    (gacp): AdaptiveConcatPool1d(
+      (ap): AdaptiveAvgPool1d(output_size=1)
+      (mp): AdaptiveMaxPool1d(output_size=1)
+    )
+    (flatten): Reshape(bs)
+  )
+  (1): LinBnDrop(
+    (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=24, out_features=2, bias=False)
+  )
+)
+
+
+
+

source

+
+
+

max_pool_head

+
+
 max_pool_head (n_in, c_out, seq_len, fc_dropout=0.0, bn=False,
+                y_range=None, **kwargs)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(max_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+
+
+

source

+
+
+

create_pool_plus_head

+
+
 create_pool_plus_head (*args, lin_ftrs=None, fc_dropout=0.0,
+                        concat_pool=True, bn_final=False, lin_first=False,
+                        y_range=None)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+test_eq(create_pool_plus_head(nf, c_out, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
+create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)
+
+
Sequential(
+  (0): AdaptiveConcatPool1d(
+    (ap): AdaptiveAvgPool1d(output_size=1)
+    (mp): AdaptiveMaxPool1d(output_size=1)
+  )
+  (1): Reshape(bs)
+  (2): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (3): Dropout(p=0.25, inplace=False)
+  (4): Linear(in_features=24, out_features=512, bias=False)
+  (5): ReLU(inplace=True)
+  (6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (7): Dropout(p=0.5, inplace=False)
+  (8): Linear(in_features=512, out_features=2, bias=False)
+)
+
+
+
+

source

+
+
+

create_conv_head

+
+
 create_conv_head (*args, adaptive_size=None, y_range=None)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_conv_head(nf, c_out, seq_len)(t).shape, (bs, c_out))
+test_eq(create_conv_head(nf, c_out, adaptive_size=50)(t).shape, (bs, c_out))
+create_conv_head(nf, c_out, 50)
+
+
Sequential(
+  (0): ConvBlock(
+    (0): Conv1d(12, 6, kernel_size=(1,), stride=(1,), bias=False)
+    (1): BatchNorm1d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (1): ConvBlock(
+    (0): Conv1d(6, 3, kernel_size=(1,), stride=(1,), bias=False)
+    (1): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (2): ConvBlock(
+    (0): Conv1d(3, 2, kernel_size=(1,), stride=(1,), bias=False)
+    (1): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (3): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Reshape(bs)
+  )
+)
+
+
+
+

source

+
+
+

create_mlp_head

+
+
 create_mlp_head (nf, c_out, seq_len=None, flatten=True, fc_dropout=0.0,
+                  bn=False, lin_first=False, y_range=None)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_mlp_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+t = torch.rand(bs, nf, seq_len)
+create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
+
+
Sequential(
+  (0): Reshape(bs)
+  (1): LinBnDrop(
+    (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=240, out_features=2, bias=False)
+  )
+)
+
+
+
+

source

+
+
+

create_fc_head

+
+
 create_fc_head (nf, c_out, seq_len=None, flatten=True, lin_ftrs=None,
+                 y_range=None, fc_dropout=0.0, bn=False, bn_final=False,
+                 act=ReLU(inplace=True))
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_fc_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
+
+
Sequential(
+  (0): Reshape(bs)
+  (1): LinBnDrop(
+    (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=240, out_features=2, bias=False)
+  )
+)
+
+
+
+

source

+
+
+

create_rnn_head

+
+
 create_rnn_head (*args, fc_dropout=0.0, bn=False, y_range=None)
+
+
+
bs = 16
+nf = 12
+c_out = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+test_eq(create_rnn_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
+create_rnn_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
+
+
Sequential(
+  (0): LastStep()
+  (1): LinBnDrop(
+    (0): BatchNorm1d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=12, out_features=2, bias=False)
+  )
+)
+
+
+
+

source

+
+
+

imputation_head

+
+
 imputation_head (c_in, c_out, seq_len=None, ks=1, y_range=None,
+                  fc_dropout=0.0)
+
+
+
bs = 16
+nf = 12
+ni = 2
+seq_len = 20
+t = torch.rand(bs, nf, seq_len)
+head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=None, fc_dropout=0.)
+test_eq(head(t).shape, (bs, ni, seq_len))
+head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=(.3,.7), fc_dropout=0.)
+test_ge(head(t).min(), .3)
+test_le(head(t).max(), .7)
+y_range = (tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,
+                   0.3000, 0.3000, 0.3000]),
+           tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,
+                   0.8000, 0.8000, 0.8000]))
+test_ge(head(t).min(), .1)
+test_le(head(t).max(), .9)
+head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=y_range, fc_dropout=0.)
+head
+
+
Sequential(
+  (0): Dropout(p=0.0, inplace=False)
+  (1): Conv1d(12, 2, kernel_size=(1,), stride=(1,))
+  (2): fastai.layers.SigmoidRange(low=tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,
+          0.3000, 0.3000, 0.3000]), high=tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,
+          0.8000, 0.8000, 0.8000]))
+)
+
+
+
+

source

+
+
+

create_conv_lin_nd_head

+
+
 create_conv_lin_nd_head (n_in, n_out, seq_len, d, conv_first=True,
+                          conv_bn=False, lin_bn=False, fc_dropout=0.0,
+                          **kwargs)
+
+

Module to create a nd output head

+
+
bs = 16
+nf = 32
+c = 5
+seq_len = 10
+d = 2
+targ = torch.randint(0, c, (bs,d))
+t = torch.randn(bs, nf, seq_len)
+head = conv_lin_nd_head(nf, c, seq_len, d, conv_first=True, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, (bs, d, c))
+loss = CrossEntropyLossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(1.7074, grad_fn=<AliasBackward0>),
+ create_conv_lin_nd_head(
+   (0): Conv1d(32, 5, kernel_size=(1,), stride=(1,))
+   (1): Dropout(p=0.5, inplace=False)
+   (2): Linear(in_features=10, out_features=2, bias=True)
+   (3): Transpose(-1, -2)
+   (4): Reshape(bs, 2, 5)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 5
+seq_len = 10
+d = [2, 8]
+targ = torch.randint(0, c, [bs]+d)
+t = torch.randn(bs, nf, seq_len)
+head = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, [bs]+d+[c])
+loss = CrossEntropyLossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(1.6561, grad_fn=<AliasBackward0>),
+ create_conv_lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Linear(in_features=10, out_features=16, bias=True)
+   (2): Conv1d(32, 5, kernel_size=(1,), stride=(1,))
+   (3): Transpose(-1, -2)
+   (4): Reshape(bs, 2, 8, 5)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 1
+seq_len = 10
+d = 2
+targ = torch.rand(bs, d)
+t = torch.randn(bs, nf, seq_len)
+head = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, (bs, d))
+loss = L1LossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(0.6017, grad_fn=<AliasBackward0>),
+ create_conv_lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Linear(in_features=10, out_features=2, bias=True)
+   (2): Conv1d(32, 1, kernel_size=(1,), stride=(1,))
+   (3): Transpose(-1, -2)
+   (4): Reshape(bs, 2)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 1
+seq_len = 10
+d = [2,3]
+targ = torch.rand(bs, *d)
+t = torch.randn(bs, nf, seq_len)
+head = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, [bs]+d)
+loss = L1LossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(0.5439, grad_fn=<AliasBackward0>),
+ create_conv_lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Linear(in_features=10, out_features=6, bias=True)
+   (2): Conv1d(32, 1, kernel_size=(1,), stride=(1,))
+   (3): Transpose(-1, -2)
+   (4): Reshape(bs, 2, 3)
+ ))
+
+
+
+

source

+
+
+

lin_nd_head

+
+
 lin_nd_head (n_in, n_out, seq_len=None, d=None, flatten=False,
+              use_bn=False, fc_dropout=0.0)
+
+

Module to create a nd output head with linear layers

+
+
bs = 16
+nf = 32
+seq_len = 50
+x = torch.normal(0, 1, (bs, nf, seq_len))
+
+for use_bn in [False, True]:
+    for fc_dropout in [0, 0.2]:
+        for flatten in [False, True]:
+            for c in [1, 3]:
+                for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:
+                    for q_len in [1, seq_len]:
+                        head = lin_nd_head(nf, c, q_len, d, flatten=flatten, use_bn=use_bn, fc_dropout=fc_dropout)
+                        test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))
+
+
+
bs = 16
+nf = 32
+c = 5
+seq_len = 10
+d = 2
+targ = torch.randint(0, c, (bs,d))
+t = torch.randn(bs, nf, seq_len)
+head = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, (bs, d, c))
+loss = CrossEntropyLossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(1.8360, grad_fn=<AliasBackward0>),
+ lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Reshape(bs)
+   (2): Linear(in_features=320, out_features=10, bias=True)
+   (3): Reshape(bs, 2, 5)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 5
+seq_len = 10
+d = [2, 8]
+targ = torch.randint(0, c, [bs]+d)
+t = torch.randn(bs, nf, seq_len)
+head = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, [bs]+d+[c])
+loss = CrossEntropyLossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(1.7557, grad_fn=<AliasBackward0>),
+ lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Reshape(bs)
+   (2): Linear(in_features=320, out_features=80, bias=True)
+   (3): Reshape(bs, 2, 8, 5)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 1
+seq_len = 10
+d = 2
+targ = torch.rand(bs, d)
+t = torch.randn(bs, nf, seq_len)
+head = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, (bs, d))
+loss = L1LossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(0.5978, grad_fn=<AliasBackward0>),
+ lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Reshape(bs)
+   (2): Linear(in_features=320, out_features=2, bias=True)
+   (3): Reshape(bs, 2)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 1
+seq_len = 10
+d = [2,3]
+targ = torch.rand(bs, *d)
+t = torch.randn(bs, nf, seq_len)
+head = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)
+inp = head(t)
+test_eq(inp.shape, [bs]+d)
+loss = L1LossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(0.8286, grad_fn=<AliasBackward0>),
+ lin_nd_head(
+   (0): Dropout(p=0.5, inplace=False)
+   (1): Reshape(bs)
+   (2): Linear(in_features=320, out_features=6, bias=True)
+   (3): Reshape(bs, 2, 3)
+ ))
+
+
+
+

source

+
+
+

rocket_nd_head

+
+
 rocket_nd_head (n_in, n_out, seq_len=None, d=None, use_bn=False,
+                 fc_dropout=0.0, zero_init=True)
+
+

Module to create a nd output head with linear layers for the rocket family of models

+
+
bs = 16
+nf = 99
+seq_len = 1
+x = torch.normal(0, 1, (bs, nf, seq_len))
+
+for use_bn in [False, True]:
+    for fc_dropout in [0, 0.2]:
+        for c in [1, 3]:
+            for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:
+                head = rocket_nd_head(nf, c, 1, d, use_bn=use_bn, fc_dropout=fc_dropout)
+                test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))
+
+
+

source

+
+
+

xresnet1d_nd_head

+
+
 xresnet1d_nd_head (n_in, n_out, seq_len=None, d=None, use_bn=False,
+                    fc_dropout=0.0, zero_init=True)
+
+

Module to create a nd output head with linear layers for the xresnet family of models

+
+
bs = 16
+nf = 99
+seq_len = 2
+x = torch.normal(0, 1, (bs, nf, seq_len))
+
+for use_bn in [False, True]:
+    for fc_dropout in [0, 0.2]:
+        for c in [1, 3]:
+            for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:
+                head = xresnet1d_nd_head(nf, c, 1, d, use_bn=use_bn, fc_dropout=fc_dropout)
+                test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))
+
+
+

source

+
+
+

create_conv_3d_head

+
+
 create_conv_3d_head (n_in, n_out, seq_len, d, use_bn=False, **kwargs)
+
+

Module to create a nd output head with a convolutional layer

+
+
bs = 16
+nf = 32
+c = 5
+seq_len = 10
+d = 10
+targ = torch.randint(0, c, (bs,d))
+t = torch.randn(bs, nf, seq_len)
+head = conv_3d_head(nf, c, seq_len, d)
+inp = head(t)
+test_eq(inp.shape, (bs, d, c))
+loss = CrossEntropyLossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(1.7321, grad_fn=<AliasBackward0>),
+ create_conv_3d_head(
+   (0): ConvBlock(
+     (0): Conv1d(32, 5, kernel_size=(1,), stride=(1,))
+   )
+   (1): Transpose(-1, -2)
+ ))
+
+
+
+
bs = 16
+nf = 32
+c = 1
+seq_len = 10
+d = 10
+targ = torch.rand(bs, d)
+t = torch.randn(bs, nf, seq_len)
+head = conv_3d_head(nf, c, seq_len, d)
+inp = head(t)
+test_eq(inp.shape, (bs, d))
+loss = L1LossFlat()(inp, targ)
+loss, head
+
+
(TensorBase(0.5833, grad_fn=<AliasBackward0>),
+ create_conv_3d_head(
+   (0): ConvBlock(
+     (0): Conv1d(32, 1, kernel_size=(1,), stride=(1,))
+   )
+   (1): Transpose(-1, -2)
+   (2): Squeeze(dim=-1)
+ ))
+
+
+
+

source

+
+
+

universal_pool_head

+
+
 universal_pool_head (n_in, c_out, seq_len, mult=2, pool_n_layers=2,
+                      pool_ln=True, pool_dropout=0.5, pool_act=ReLU(),
+                      zero_init=True, bn=True, fc_dropout=0.0)
+
+
+
bs, c_in, seq_len = 16, 128, 50
+c_out = 14
+t = torch.rand(bs, c_in, seq_len)
+uph = universal_pool_head(c_in, c_out, seq_len)
+test_eq(uph(t).shape, (bs, c_out))
+uph = universal_pool_head(c_in, c_out, seq_len, 2)
+test_eq(uph(t).shape, (bs, c_out))
+
+
+
bs, c_in, seq_len = 16, 128, 50
+c_out = 14
+d = 5
+t = torch.rand(bs, c_in, seq_len)
+for head in heads: 
+    print(head.__name__)
+    if head.__name__ == "create_conv_3d_head":
+        h = head(c_in, c_out, seq_len, seq_len)
+        test_eq(h(t).shape, (bs, seq_len, c_out))
+    elif 'nd' in head.__name__: 
+        h = head(c_in, c_out, seq_len, d)
+        test_eq(h(t).shape, (bs, d, c_out))
+    else: 
+        h = head(c_in, c_out, seq_len)
+        test_eq(h(t).shape, (bs, c_out))
+
+
create_mlp_head
+create_fc_head
+average_pool_head
+max_pool_head
+concat_pool_head
+create_pool_plus_head
+create_conv_head
+create_rnn_head
+create_conv_lin_nd_head
+lin_nd_head
+create_conv_3d_head
+attentional_pool_head
+universal_pool_head
+gwa_pool_head
+
+
+
+

source

+
+
+

SqueezeExciteBlock

+
+
 SqueezeExciteBlock (ni, reduction=16)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 2
+ni = 32
+sl = 4
+t = torch.rand(bs, ni, sl)
+test_eq(SqueezeExciteBlock(ni)(t).shape, (bs, ni, sl))
+
+
+

source

+
+
+

GaussianNoise

+
+
 GaussianNoise (sigma=0.1, is_relative_detach=True)
+
+

Gaussian noise regularizer.

+

Args: sigma (float, optional): relative standard deviation used to generate the noise. Relative means that it will be multiplied by the magnitude of the value your are adding the noise to. This means that sigma can be the same regardless of the scale of the vector. is_relative_detach (bool, optional): whether to detach the variable before computing the scale of the noise. If False then the scale of the noise won’t be seen as a constant but something to optimize: this will bias the network to generate vectors with smaller values.

+
+
t = torch.ones(2,3,4)
+test_ne(GaussianNoise()(t), t)
+test_eq(GaussianNoise()(t).shape, t.shape)
+t = torch.ones(2,3)
+test_ne(GaussianNoise()(t), t)
+test_eq(GaussianNoise()(t).shape, t.shape)
+t = torch.ones(2)
+test_ne(GaussianNoise()(t), t)
+test_eq(GaussianNoise()(t).shape, t.shape)
+
+
+

source

+
+
+

TokenLayer

+
+
 TokenLayer (token=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

PositionwiseFeedForward

+
+
 PositionwiseFeedForward (dim, dropout=0.0, act='reglu', mlp_ratio=1)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
t = torch.randn(2,3,10)
+m = PositionwiseFeedForward(10, dropout=0., act='reglu', mlp_ratio=1)
+test_eq(m(t).shape, t.shape)
+m = PositionwiseFeedForward(10, dropout=0., act='smelu', mlp_ratio=1)
+test_eq(m(t).shape, t.shape)
+
+
+

source

+
+
+

ScaledDotProductAttention

+
+
 ScaledDotProductAttention (d_model, n_heads, attn_dropout=0.0,
+                            res_attention=False, lsa=False)
+
+

Scaled Dot-Product Attention module (Attention is all you need by Vaswani et al., 2017) with optional residual attention from previous layer (Realformer: Transformer likes residual attention by He et al, 2020) and locality self sttention (Vision Transformer for Small-Size Datasets by Lee et al, 2021)

+
+
B = 16
+C = 10
+M = 1500 # seq_len
+
+n_heads = 1
+D = 128 # model dimension
+N = 512 # max_seq_len - latent's index dimension
+d_k = D // n_heads
+
+xb = torch.randn(B, C, M)
+xb = (xb - xb.mean()) / xb.std()
+
+# Attention
+# input (Q)
+lin = nn.Linear(M, N, bias=False)
+Q = lin(xb).transpose(1,2)
+test_eq(Q.shape, (B, N, C))
+
+# q
+to_q = nn.Linear(C, D, bias=False)
+q = to_q(Q)
+q = nn.LayerNorm(D)(q)
+
+# k, v
+context = xb.transpose(1,2)
+to_kv = nn.Linear(C, D * 2, bias=False)
+k, v = to_kv(context).chunk(2, dim = -1)
+k = k.transpose(-1, -2)
+k = nn.LayerNorm(M)(k)
+v = nn.LayerNorm(D)(v)
+
+test_eq(q.shape, (B, N, D))
+test_eq(k.shape, (B, D, M))
+test_eq(v.shape, (B, M, D))
+
+output, attn, scores = ScaledDotProductAttention(D, n_heads, res_attention=True)(q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1))
+test_eq(output.shape, (B, 1, N, D))
+test_eq(attn.shape, (B, 1, N, M))
+test_eq(scores.shape, (B, 1, N, M))
+scores.mean(), scores.std()
+
+
(tensor(1.3535e-10, grad_fn=<MeanBackward0>),
+ tensor(1.0555, grad_fn=<StdBackward0>))
+
+
+
+

source

+
+
+

MultiheadAttention

+
+
 MultiheadAttention (d_model, n_heads, d_k=None, d_v=None,
+                     res_attention=False, attn_dropout=0.0,
+                     proj_dropout=0.0, qkv_bias=True, lsa=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
q = torch.rand([16, 3, 50, 8]) 
+k = torch.rand([16, 3, 50, 8]).transpose(-1, -2)
+v = torch.rand([16, 3, 50, 6])
+attn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len
+key_padding_mask = torch.zeros(16, 50)
+key_padding_mask[[1, 3, 6, 15], -10:] = 1
+key_padding_mask = key_padding_mask.bool()
+print('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)
+output, attn = ScaledDotProductAttention(24, 3, attn_dropout=.1)(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)
+output.shape, attn.shape
+
+
attn_mask torch.Size([50, 50]) key_padding_mask torch.Size([16, 50])
+
+
+
(torch.Size([16, 3, 50, 6]), torch.Size([16, 3, 50, 50]))
+
+
+
+
t = torch.rand(16, 50, 128)
+output, attn = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
+output.shape, attn.shape
+
+
(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))
+
+
+

Test multi-head attention with self-locality attention

+
+
# lsa (locality self-sttention)
+t = torch.rand(16, 50, 128)
+attn_mask = torch.eye(50).reshape(1, 1, 50, 50).bool()
+output, attn = MultiheadAttention(d_model=128, n_heads=8, lsa=True)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
+output.shape, attn.shape
+
+
(torch.Size([16, 50, 128]), torch.Size([16, 8, 50, 50]))
+
+
+
+
t = torch.rand(16, 50, 128)
+att_mask = (torch.rand((50, 50)) > .85).float()
+att_mask[att_mask == 1] = -np.inf
+
+mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
+output, attn = mha(t, t, t, attn_mask=att_mask)
+test_eq(torch.isnan(output).sum().item(), 0)
+test_eq(torch.isnan(attn).sum().item(), 0)
+loss = output[:2, :].sum()
+test_eq(torch.isnan(loss).sum().item(), 0)
+loss.backward()
+for n, p in mha.named_parameters(): 
+    if p.grad is not None:
+        test_eq(torch.isnan(p.grad).sum().item(), 0)
+
+
+
t = torch.rand(16, 50, 128)
+attn_mask = (torch.rand((50, 50)) > .85)
+
+# True values will be masked
+mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
+output, attn = mha(t, t, t, attn_mask=att_mask)
+test_eq(torch.isnan(output).sum().item(), 0)
+test_eq(torch.isnan(attn).sum().item(), 0)
+loss = output[:2, :].sum()
+test_eq(torch.isnan(loss).sum().item(), 0)
+loss.backward()
+for n, p in mha.named_parameters(): 
+    if p.grad is not None:
+        test_eq(torch.isnan(p.grad).sum().item(), 0)
+
+
+

source

+
+
+

MultiConv1d

+
+
 MultiConv1d (ni, nf=None, kss=[1, 3, 5, 7], keep_original=False,
+              separable=False, dim=1, **kwargs)
+
+

Module that applies multiple convolutions with different kernel sizes

+
+
t = torch.rand(16, 6, 37)
+test_eq(MultiConv1d(6, None, kss=[1,3,5], keep_original=True)(t).shape, [16, 24, 37])
+test_eq(MultiConv1d(6, 36, kss=[1,3,5], keep_original=False)(t).shape, [16, 36, 37])
+test_eq(MultiConv1d(6, None, kss=[1,3,5], keep_original=True, dim=-1)(t).shape, [16, 6, 37*4])
+test_eq(MultiConv1d(6, 60, kss=[1,3,5], keep_original=True)(t).shape, [16, 60, 37])
+test_eq(MultiConv1d(6, 60, kss=[1,3,5], separable=True)(t).shape, [16, 60, 37])
+
+
+

source

+
+
+

LSTMOutput

+
+
 LSTMOutput ()
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
t = ([1], [2], [3])
+test_eq(LSTMOutput()(t), [1])
+
+
+

source

+
+
+

emb_sz_rule

+
+
 emb_sz_rule (n_cat)
+
+

Rule of thumb to pick embedding size corresponding to n_cat (original from fastai)

+
+
test_eq(emb_sz_rule(7), 5)
+
+
+

source

+
+
+

TSEmbedding

+
+
 TSEmbedding (ni, nf, std=0.01, padding_idx=None)
+
+

Embedding layer with truncated normal initialization adapted from fastai

+
+

source

+
+
+

MultiEmbedding

+
+
 MultiEmbedding (c_in, n_cat_embeds, cat_embed_dims=None, cat_pos=None,
+                 std=0.01, cat_padding_idxs=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
a = alphabet[np.random.randint(0,3,40)]
+b = ALPHABET[np.random.randint(6,10,40)]
+c = np.random.rand(40).reshape(4,1,10)
+map_a = {k:v for v,k in enumerate(np.unique(a))}
+map_b = {k:v for v,k in enumerate(np.unique(b))}
+n_embeds = [len(m.keys()) for m in [map_a, map_b]]
+szs = [emb_sz_rule(n) for n in n_embeds]
+a = np.asarray(a.map(map_a)).reshape(4,1,10)
+b = np.asarray(b.map(map_b)).reshape(4,1,10)
+inp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()
+memb = MultiEmbedding(3, n_embeds, cat_pos=[1,2])
+# registered buffers are part of the state_dict() but not module.parameters()
+assert all([(k in memb.state_dict().keys()) for k in ['cat_pos', 'cont_pos']])
+embeddings = memb(inp)
+print(n_embeds, szs, inp.shape, embeddings.shape)
+test_eq(embeddings.shape, (inp.shape[0],sum(szs)+1,inp.shape[-1]))
+
+
[3, 4] [3, 3] torch.Size([4, 3, 10]) torch.Size([4, 7, 10])
+
+
+
+
me = MultiEmbedding(3, 4, cat_pos=2)
+test_eq(me.cat_embed[0].weight.shape, (4,3))
+test_eq(me.cat_pos.cpu().item(), 2)
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.minirocket.html b/models.minirocket.html new file mode 100644 index 000000000..ba1c771b8 --- /dev/null +++ b/models.minirocket.html @@ -0,0 +1,1433 @@ + + + + + + + + + +tsai - MINIROCKET + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MINIROCKET

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

A Very Fast (Almost) Deterministic Transform for Time Series Classification.

+
+
+

source

+
+

MiniRocketClassifier

+
+
 MiniRocketClassifier (num_features=10000, max_dilations_per_kernel=32,
+                       random_state=None, alphas=array([1.e-03, 1.e-02,
+                       1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),
+                       normalize_features=True, memory=None,
+                       verbose=False, scoring=None, class_weight=None,
+                       **kwargs)
+
+

Time series classification using MINIROCKET features and a linear classifier

+
+

source

+
+
+

load_minirocket

+
+
 load_minirocket (fname, path='./models')
+
+
+

source

+
+
+

MiniRocketRegressor

+
+
 MiniRocketRegressor (num_features=10000, max_dilations_per_kernel=32,
+                      random_state=None, alphas=array([1.e-03, 1.e-02,
+                      1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),
+                      normalize_features=True, memory=None, verbose=False,
+                      scoring=None, **kwargs)
+
+

Time series regression using MINIROCKET features and a linear regressor

+
+

source

+
+
+

load_minirocket

+
+
 load_minirocket (fname, path='./models')
+
+
+

source

+
+
+

MiniRocketVotingClassifier

+
+
 MiniRocketVotingClassifier (n_estimators=5, weights=None, n_jobs=-1,
+                             num_features=10000,
+                             max_dilations_per_kernel=32,
+                             random_state=None, alphas=array([1.e-03,
+                             1.e-02, 1.e-01, 1.e+00, 1.e+01, 1.e+02,
+                             1.e+03]), normalize_features=True,
+                             memory=None, verbose=False, scoring=None,
+                             class_weight=None, **kwargs)
+
+

Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting

+
+

source

+
+
+

get_minirocket_preds

+
+
 get_minirocket_preds (X, fname, path='./models', model=None)
+
+
+

source

+
+
+

MiniRocketVotingRegressor

+
+
 MiniRocketVotingRegressor (n_estimators=5, weights=None, n_jobs=-1,
+                            num_features=10000,
+                            max_dilations_per_kernel=32,
+                            random_state=None, alphas=array([1.e-03,
+                            1.e-02, 1.e-01, 1.e+00, 1.e+01, 1.e+02,
+                            1.e+03]), normalize_features=True,
+                            memory=None, verbose=False, scoring=None,
+                            **kwargs)
+
+

Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor

+
+
# Univariate classification with sklearn-type API
+dsid = 'OliveOil'
+fname = 'MiniRocketClassifier'
+X_train, y_train, X_test, y_test = get_UCR_data(dsid)
+cls = MiniRocketClassifier()
+cls.fit(X_train, y_train)
+cls.save(fname)
+pred = cls.score(X_test, y_test)
+del cls
+cls = load_minirocket(fname)
+test_eq(cls.score(X_test, y_test), pred)
+
+
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+
+
+
+
# Multivariate classification with sklearn-type API
+dsid = 'NATOPS'
+X_train, y_train, X_test, y_test = get_UCR_data(dsid)
+cls = MiniRocketClassifier()
+cls.fit(X_train, y_train)
+cls.score(X_test, y_test)
+
+
0.9277777777777778
+
+
+
+
# Multivariate classification with sklearn-type API
+dsid = 'NATOPS'
+X_train, y_train, X_test, y_test = get_UCR_data(dsid)
+cls = MiniRocketVotingClassifier(5)
+cls.fit(X_train, y_train)
+cls.score(X_test, y_test)
+
+
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+
+
+
0.9166666666666666
+
+
+
+
from sklearn.metrics import mean_squared_error
+
+
+
# Univariate regression with sklearn-type API
+dsid = 'Covid3Month'
+fname = 'MiniRocketRegressor'
+X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
+if X_train is not None:
+    rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
+    reg = MiniRocketRegressor(scoring=rmse_scorer)
+    reg.fit(X_train, y_train)
+    reg.save(fname)
+    del reg
+    reg = load_minirocket(fname)
+    y_pred = reg.predict(X_test)
+    print(mean_squared_error(y_test, y_pred, squared=False))
+
+
0.04099244037606886
+
+
+
+
# Multivariate regression with sklearn-type API
+dsid = 'AppliancesEnergy'
+X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
+if X_train is not None:
+    rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
+    reg = MiniRocketRegressor(scoring=rmse_scorer)
+    reg.fit(X_train, y_train)
+    reg.save(fname)
+    del reg
+    reg = load_minirocket(fname)
+    y_pred = reg.predict(X_test)
+    print(mean_squared_error(y_test, y_pred, squared=False))
+
+
2.2938026879322577
+
+
+
+
# Multivariate regression ensemble with sklearn-type API
+if X_train is not None:
+    reg = MiniRocketVotingRegressor(5, scoring=rmse_scorer)
+    reg.fit(X_train, y_train)
+    y_pred = reg.predict(X_test)
+    print(mean_squared_error(y_test, y_pred, squared=False))
+
+
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+
+
+
2.286295546348893
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.minirocket_pytorch.html b/models.minirocket_pytorch.html new file mode 100644 index 000000000..1d4096988 --- /dev/null +++ b/models.minirocket_pytorch.html @@ -0,0 +1,1422 @@ + + + + + + + + + +tsai - MINIROCKET Pytorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MINIROCKET Pytorch

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

A Very Fast (Almost) Deterministic Transform for Time Series Classification.

+
+

This is a Pytorch implementation of MiniRocket developed by Malcolm McLean and Ignacio Oguiza based on:

+

Dempster, A., Schmidt, D. F., & Webb, G. I. (2020). MINIROCKET: A Very Fast (Almost) Deterministic Transform for Time Series Classification. arXiv preprint arXiv:2012.08791.

+

Original paper: https://arxiv.org/abs/2012.08791

+

Original code: https://github.com/angus924/minirocket

+
+

source

+
+

MiniRocketFeatures

+
+
 MiniRocketFeatures (c_in, seq_len, num_features=10000,
+                     max_dilations_per_kernel=32, random_state=None)
+
+

This is a Pytorch implementation of MiniRocket developed by Malcolm McLean and Ignacio Oguiza

+

MiniRocket paper citation: @article{dempster_etal_2020, author = {Dempster, Angus and Schmidt, Daniel F and Webb, Geoffrey I}, title = {{MINIROCKET}: A Very Fast (Almost) Deterministic Transform for Time Series Classification}, year = {2020}, journal = {arXiv:2012.08791} } Original paper: https://arxiv.org/abs/2012.08791 Original code: https://github.com/angus924/minirocket

+
+

source

+
+
+

get_minirocket_features

+
+
 get_minirocket_features (o, model, chunksize=1024, use_cuda=None,
+                          to_np=True)
+
+

Function used to split a large dataset into chunks, avoiding OOM error.

+
+

source

+
+
+

MiniRocketHead

+
+
 MiniRocketHead (c_in, c_out, seq_len=1, bn=True, fc_dropout=0.0)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

MiniRocket

+
+
 MiniRocket (c_in, c_out, seq_len, num_features=10000,
+             max_dilations_per_kernel=32, random_state=None, bn=True,
+             fc_dropout=0)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.imports import default_device
+from fastai.metrics import accuracy
+from fastai.callback.tracker import ReduceLROnPlateau
+from tsai.data.all import *
+from tsai.learner import *
+
+
+
# Offline feature calculation
+dsid = 'ECGFiveDays'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+mrf = MiniRocketFeatures(c_in=X.shape[1], seq_len=X.shape[2]).to(default_device())
+X_train = X[splits[0]]  # X_train may either be a np.ndarray or a torch.Tensor
+mrf.fit(X_train)
+X_tfm = get_minirocket_features(X, mrf)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_var=True)
+dls = get_ts_dls(X_tfm, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)
+learn = ts_learner(dls, MiniRocketHead, metrics=accuracy)
+learn.fit(1, 1e-4, cbs=ReduceLROnPlateau(factor=0.5, min_lr=1e-8, patience=10))
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
00.6931470.5308790.75261300:00
+
+
+
+
# Online feature calculation
+dsid = 'ECGFiveDays'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)
+learn = ts_learner(dls, MiniRocket, metrics=accuracy)
+learn.fit_one_cycle(1, 1e-2)
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
00.6931470.7132970.50290400:06
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.minirocketplus_pytorch.html b/models.minirocketplus_pytorch.html new file mode 100644 index 000000000..88f1167f7 --- /dev/null +++ b/models.minirocketplus_pytorch.html @@ -0,0 +1,1567 @@ + + + + + + + + + +tsai - MINIROCKETPlus Pytorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MINIROCKETPlus Pytorch

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is a modified Pytorch implementation of MiniRocket originally developed by Malcolm McLean and Ignacio Oguiza and based on:

+

Dempster, A., Schmidt, D. F., & Webb, G. I. (2020). MINIROCKET: A Very Fast (Almost) Deterministic Transform for Time Series Classification. arXiv preprint arXiv:2012.08791.

+

Original paper: https://arxiv.org/abs/2012.08791

+

Original code: https://github.com/angus924/minirocket

+
+

source

+
+

MiniRocketFeaturesPlus

+
+
 MiniRocketFeaturesPlus (c_in, seq_len, num_features=10000,
+                         max_dilations_per_kernel=32, kernel_size=9,
+                         max_num_channels=9, max_num_kernels=84,
+                         add_lsaz=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

MiniRocketPlus

+
+
 MiniRocketPlus (c_in, c_out, seq_len, num_features=10000,
+                 max_dilations_per_kernel=32, kernel_size=9,
+                 max_num_channels=None, max_num_kernels=84, bn=True,
+                 fc_dropout=0, add_lsaz=False, custom_head=None,
+                 zero_init=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

Flatten

+
+
 Flatten (*args, **kwargs)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

get_minirocket_features

+
+
 get_minirocket_features (o, model, chunksize=1024, use_cuda=None,
+                          to_np=False)
+
+

Function used to split a large dataset into chunks, avoiding OOM error.

+
+

source

+
+
+

MiniRocketHead

+
+
 MiniRocketHead (c_in, c_out, seq_len=1, bn=True, fc_dropout=0.0)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.imports import default_device
+from fastai.metrics import accuracy
+from fastai.callback.tracker import ReduceLROnPlateau
+from tsai.data.all import *
+from tsai.learner import *
+
+
+
# Offline feature calculation
+dsid = 'ECGFiveDays'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+mrf = MiniRocketFeaturesPlus(c_in=X.shape[1], seq_len=X.shape[2]).to(default_device())
+X_train = X[splits[0]]  # X_train may either be a np.ndarray or a torch.Tensor
+mrf.fit(X_train)
+X_tfm = get_minirocket_features(X, mrf).cpu().numpy()
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_var=True)
+dls = get_ts_dls(X_tfm, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)
+learn = ts_learner(dls, MiniRocketHead, metrics=accuracy)
+learn.fit(1, 1e-4, cbs=ReduceLROnPlateau(factor=0.5, min_lr=1e-8, patience=10))
+
+
+
# Online feature calculation
+dsid = 'ECGFiveDays'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)
+learn = ts_learner(dls, MiniRocketPlus, kernel_size=7, metrics=accuracy)
+learn.fit_one_cycle(1, 1e-2)
+
+
+
from functools import partial
+from fastcore.test import *
+from tsai.models.utils import build_ts_model
+from tsai.models.layers import mlp_head, rocket_nd_head
+
+
+
bs, c_in, seq_len = 8, 3, 50
+c_out = 2
+xb = torch.randn(bs, c_in, seq_len)
+model = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, add_lsaz=True)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, custom_head=mlp_head)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+
+
+
X = np.random.rand(8, 10, 100)
+y = np.random.rand(8, 1, 100)
+splits = TimeSplitter(show_plot=False)(y)
+tfms = [None, TSRegression()]
+batch_tfms = TSStandardize(by_sample=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+custom_head = partial(rocket_nd_head, d=dls.d)
+model = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)
+xb,yb = dls.one_batch()
+test_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:])
+
+
+
X = np.random.rand(16, 10, 100)
+y = np.random.randint(0, 4, (16, 1, 100))
+splits = TimeSplitter(show_plot=False)(y)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_sample=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+custom_head = partial(rocket_nd_head, d=dls.d)
+model = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)
+xb,yb = dls.one_batch()
+test_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:]+(4,))
+
+
+

source

+
+
+

InceptionRocketFeaturesPlus

+
+
 InceptionRocketFeaturesPlus (c_in, seq_len, num_features=10000,
+                              max_dilations_per_kernel=32,
+                              kernel_sizes=array([3, 5, 7, 9]),
+                              max_num_channels=None, max_num_kernels=84,
+                              add_lsaz=True, same_n_feats_per_ks=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

InceptionRocketPlus

+
+
 InceptionRocketPlus (c_in, c_out, seq_len, num_features=10000,
+                      max_dilations_per_kernel=32, kernel_sizes=[3, 5, 7,
+                      9], max_num_channels=None, max_num_kernels=84,
+                      same_n_feats_per_ks=False, add_lsaz=False, bn=True,
+                      fc_dropout=0, custom_head=None, zero_init=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from fastcore.test import *
+from tsai.models.utils import build_ts_model
+
+
+
bs, c_in, seq_len = 8, 3, 50
+c_out = 2
+xb = torch.randn(bs, c_in, seq_len)
+model = build_ts_model(InceptionRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = build_ts_model(InceptionRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, add_lsaz=True)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+
+
+
X = np.random.rand(8, 10, 100)
+y = np.random.rand(8, 1, 100)
+splits = TimeSplitter(show_plot=False)(y)
+tfms = [None, TSRegression()]
+batch_tfms = TSStandardize(by_sample=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+custom_head = partial(rocket_nd_head, d=dls.d)
+model = InceptionRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)
+xb,yb = dls.one_batch()
+test_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:])
+
+
+
X = np.random.rand(16, 10, 100)
+y = np.random.randint(0, 4, (16, 1, 100))
+splits = TimeSplitter(show_plot=False)(y)
+tfms = [None, TSClassification()]
+batch_tfms = TSStandardize(by_sample=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+custom_head = partial(rocket_nd_head, d=dls.d)
+model = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)
+xb,yb = dls.one_batch()
+test_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:]+(4,))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.misc.html b/models.misc.html new file mode 100644 index 000000000..12d1a475b --- /dev/null +++ b/models.misc.html @@ -0,0 +1,1289 @@ + + + + + + + + + +tsai - Miscellaneous + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Miscellaneous

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This contains a set of experiments.

+
+
+

source

+
+

InputWrapper

+
+
 InputWrapper (arch, c_in, c_out, seq_len, new_c_in=None,
+               new_seq_len=None, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.models.TST import *
+
+
+
xb = torch.randn(16, 1, 1000)
+model = InputWrapper(TST, 1, 4, 1000, 10, 224)
+test_eq(model.to(xb.device)(xb).shape, (16,4))
+
+
+

source

+
+
+

ResidualWrapper

+
+
 ResidualWrapper (model)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

RecursiveWrapper

+
+
 RecursiveWrapper (model, n_steps, anchored=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
xb = torch.randn(16, 1, 20)
+model = RecursiveWrapper(TST(1, 1, 20), 5)
+test_eq(model.to(xb.device)(xb).shape, (16, 5))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.mlp.html b/models.mlp.html new file mode 100644 index 000000000..e5da1c3b6 --- /dev/null +++ b/models.mlp.html @@ -0,0 +1,1294 @@ + + + + + + + + + +tsai - MLP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MLP

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:

+

Fawaz, H. I., Forestier, G., Weber, J., Idoumghar, L., & Muller, P. A. (2019). Deep learning for time series classification: a review. Data Mining and Knowledge Discovery, 33(4), 917-963.

+

Official MLP TensorFlow implementation: https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mlp.py

+
+

source

+
+

MLP

+
+
 MLP (c_in, c_out, seq_len, layers=[500, 500, 500], ps=[0.1, 0.2, 0.2],
+      act=ReLU(inplace=True), use_bn=False, bn_final=False,
+      lin_first=False, fc_dropout=0.0, y_range=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+nvars = 3
+seq_len = 128
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = MLP(nvars, c_out, seq_len)
+test_eq(model(xb).shape, (bs, c_out))
+model
+
+
MLP(
+  (flatten): Reshape(bs)
+  (mlp): ModuleList(
+    (0): LinBnDrop(
+      (0): Dropout(p=0.1, inplace=False)
+      (1): Linear(in_features=384, out_features=500, bias=True)
+      (2): ReLU(inplace=True)
+    )
+    (1): LinBnDrop(
+      (0): Dropout(p=0.2, inplace=False)
+      (1): Linear(in_features=500, out_features=500, bias=True)
+      (2): ReLU(inplace=True)
+    )
+    (2): LinBnDrop(
+      (0): Dropout(p=0.2, inplace=False)
+      (1): Linear(in_features=500, out_features=500, bias=True)
+      (2): ReLU(inplace=True)
+    )
+  )
+  (head): Sequential(
+    (0): LinBnDrop(
+      (0): Linear(in_features=500, out_features=2, bias=True)
+    )
+  )
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.multiinputnet.html b/models.multiinputnet.html new file mode 100644 index 000000000..9b6def6ea --- /dev/null +++ b/models.multiinputnet.html @@ -0,0 +1,1336 @@ + + + + + + + + + +tsai - MultiInputNet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MultiInputNet

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co).

+

It can be used to combine different types of deep learning models into a single one that will accept multiple inputs from a MixedDataLoaders.

+
+

source

+
+

MultiInputNet

+
+
 MultiInputNet (*models, c_out=None, reshape_fn=None, multi_output=False,
+                custom_head=None, device=None, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.basics import *
+from tsai.data.all import *
+from tsai.models.utils import *
+from tsai.models.InceptionTimePlus import *
+from tsai.models.TabModel import *
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+ts_features_df = get_ts_features(X, y)
+
+
Feature Extraction: 100%|███████████████████████████████████████████| 40/40 [00:07<00:00,  5.23it/s]
+
+
+
+
# raw ts
+tfms  = [None, [TSCategorize()]]
+batch_tfms = TSStandardize()
+ts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+ts_model = build_ts_model(InceptionTimePlus, dls=ts_dls)
+
+# ts features
+cat_names = None
+cont_names = ts_features_df.columns[:-2]
+y_names = 'target'
+tab_dls = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)
+tab_model = build_tabular_model(TabModel, dls=tab_dls)
+
+# mixed
+mixed_dls = get_mixed_dls(ts_dls, tab_dls)
+MultiModalNet = MultiInputNet(ts_model, tab_model)
+learn = Learner(mixed_dls, MultiModalNet, metrics=[accuracy, RocAuc()])
+learn.fit_one_cycle(1, 1e-3)
+
+ + + + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracyroc_auc_scoretime
01.7806741.5717180.4777780.85744400:05
+
+
+
+
(ts, (cat, cont)),yb = mixed_dls.one_batch()
+learn.model((ts, (cat, cont))).shape
+
+
torch.Size([64, 6])
+
+
+
+
tab_dls.c, ts_dls.c, ts_dls.cat
+
+
(6, 6, True)
+
+
+
+
learn.loss_func
+
+
FlattenedLoss of CrossEntropyLoss()
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.multimodal.html b/models.multimodal.html new file mode 100644 index 000000000..0845b5dd7 --- /dev/null +++ b/models.multimodal.html @@ -0,0 +1,2407 @@ + + + + + + + + + +tsai - Multimodal + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Multimodal

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Functionality used for multiple data modalities.

+
+

A common scenario in time-series related tasks is the use of multiple types of inputs:

+ +

At the same time, these different modalities may contain:

+ +

Based on that, there are situations where we have up to 6 different types of input features:

+ +
+

source

+
+

get_feat_idxs

+
+
 get_feat_idxs (c_in, s_cat_idxs=None, s_cont_idxs=None, o_cat_idxs=None,
+                o_cont_idxs=None)
+
+

Calculate the indices of the features used for training.

+
+

source

+
+
+

get_o_cont_idxs

+
+
 get_o_cont_idxs (c_in, s_cat_idxs=None, s_cont_idxs=None,
+                  o_cat_idxs=None)
+
+

Calculate the indices of the observed continuous features.

+
+
c_in = 7
+s_cat_idxs = 3
+s_cont_idxs = [1, 4, 5]
+o_cat_idxs = None
+o_cont_idxs = None
+
+s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = get_feat_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)
+
+test_eq(s_cat_idxs, [3])
+test_eq(s_cont_idxs, [1, 4, 5])
+test_eq(o_cat_idxs, [])
+test_eq(o_cont_idxs, [0, 2, 6])
+
+
+

source

+
+
+

TensorSplitter

+
+
 TensorSplitter (s_cat_idxs:list=None, s_cont_idxs:list=None,
+                 o_cat_idxs:list=None, o_cont_idxs:list=None,
+                 k_cat_idxs:list=None, k_cont_idxs:list=None,
+                 horizon:int=None)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
s_cat_idxslistNonelist of indices for static categorical variables
s_cont_idxslistNonelist of indices for static continuous variables
o_cat_idxslistNonelist of indices for observed categorical variables
o_cont_idxslistNonelist of indices for observed continuous variables
k_cat_idxslistNonelist of indices for known categorical variables
k_cont_idxslistNonelist of indices for known continuous variables
horizonintNonenumber of time steps to predict ahead
+
+
# Example usage
+bs = 4
+s_cat_idxs = 1
+s_cont_idxs = [0, 2]
+o_cat_idxs =[ 3, 4, 5]
+o_cont_idxs = None
+k_cat_idxs = None
+k_cont_idxs = None
+horizon=None
+input_tensor = torch.randn(bs, 6, 10)  # 3D input tensor
+splitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,
+                          o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)
+slices = splitter(input_tensor)
+for i, slice_tensor in enumerate(slices):
+    print(f"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}")
+
+
Slice 1: torch.Size([4, 1]) torch.int64
+Slice 2: torch.Size([4, 2]) torch.int64
+Slice 3: torch.Size([4, 3, 10]) torch.float32
+Slice 4: torch.Size([4, 0, 10]) torch.float32
+
+
+
+
# Example usage
+bs = 4
+s_cat_idxs = 1
+s_cont_idxs = [0, 2]
+o_cat_idxs =[ 3, 4, 5]
+o_cont_idxs = None
+k_cat_idxs = [6,7]
+k_cont_idxs = 8
+horizon=3
+input_tensor = torch.randn(4, 9, 10)  # 3D input tensor
+splitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,
+                          o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs,
+                          k_cat_idxs=k_cat_idxs, k_cont_idxs=k_cont_idxs, horizon=horizon)
+slices = splitter(input_tensor)
+for i, slice_tensor in enumerate(slices):
+    print(f"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}")
+
+
Slice 1: torch.Size([4, 1]) torch.int64
+Slice 2: torch.Size([4, 2]) torch.int64
+Slice 3: torch.Size([4, 3, 7]) torch.float32
+Slice 4: torch.Size([4, 0, 7]) torch.float32
+Slice 5: torch.Size([4, 2, 10]) torch.float32
+Slice 6: torch.Size([4, 1, 10]) torch.float32
+
+
+
+

source

+
+
+

Embeddings

+
+
 Embeddings (n_embeddings:list, embedding_dims:list=None,
+             padding_idx:int=0, embed_dropout:float=0.0, **kwargs)
+
+

Embedding layers for each categorical variable in a 2D or 3D tensor

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
n_embeddingslistList of num_embeddings for each categorical variable
embedding_dimslistNoneList of embedding dimensions for each categorical variable
padding_idxint0Embedding padding_idx
embed_dropoutfloat0.0Dropout probability for Embedding layer
kwargs
+
+
t1 = torch.randint(0, 7, (16, 1))
+t2 = torch.randint(0, 5, (16, 1))
+t = torch.cat([t1, t2], 1).float()
+emb = Embeddings([7, 5], None, embed_dropout=0.1)
+test_eq(emb(t).shape, (16, 12))
+
+
+
t1 = torch.randint(0, 7, (16, 1))
+t2 = torch.randint(0, 5, (16, 1))
+t = torch.cat([t1, t2], 1).float()
+emb = Embeddings([7, 5], [4, 3])
+test_eq(emb(t).shape, (16, 12))
+
+
+
t1 = torch.randint(0, 7, (16, 1, 10))
+t2 = torch.randint(0, 5, (16, 1, 10))
+t = torch.cat([t1, t2], 1).float()
+emb = Embeddings([7, 5], None)
+test_eq(emb(t).shape, (16, 12, 10))
+
+
+

source

+
+
+

StaticBackbone

+
+
 StaticBackbone (c_in, c_out, seq_len, d=None, layers=[200, 100],
+                 dropouts=[0.1, 0.2], act=ReLU(inplace=True),
+                 use_bn=False, lin_first=False)
+
+

Static backbone model to embed static features

+
+
# Example usage
+bs = 4
+c_in = 6
+c_out = 8
+seq_len = 10
+input_tensor = torch.randn(bs, c_in, seq_len)  # 3D input tensor
+backbone = StaticBackbone(c_in, c_out, seq_len)
+output_tensor = backbone(input_tensor)
+print(f"Input shape: {input_tensor.shape} Output shape: {output_tensor.shape}")
+backbone
+
+
Input shape: torch.Size([4, 6, 10]) Output shape: torch.Size([4, 100])
+
+
+
StaticBackbone(
+  (flatten): Reshape(bs)
+  (mlp): ModuleList(
+    (0): LinBnDrop(
+      (0): Dropout(p=0.1, inplace=False)
+      (1): Linear(in_features=60, out_features=200, bias=True)
+      (2): ReLU(inplace=True)
+    )
+    (1): LinBnDrop(
+      (0): Dropout(p=0.2, inplace=False)
+      (1): Linear(in_features=200, out_features=100, bias=True)
+      (2): ReLU(inplace=True)
+    )
+  )
+)
+
+
+
+
# class MultInputWrapper(nn.Module):
+#     "Model wrapper for input tensors with static and/ or observed, categorical and/ or numerical features."
+
+#     def __init__(self,
+#         arch,
+#         c_in:int=None, # number of input variables
+#         c_out:int=None, # number of output variables
+#         seq_len:int=None, # input sequence length
+#         d:tuple=None, # shape of the output tensor
+#         dls:TSDataLoaders=None, # TSDataLoaders object
+#         s_cat_idxs:list=None, # list of indices for static categorical variables
+#         s_cat_embeddings:list=None, # list of num_embeddings for each static categorical variable
+#         s_cat_embedding_dims:list=None, # list of embedding dimensions for each static categorical variable
+#         s_cont_idxs:list=None, # list of indices for static continuous variables
+#         o_cat_idxs:list=None, # list of indices for observed categorical variables
+#         o_cat_embeddings:list=None, # list of num_embeddings for each observed categorical variable
+#         o_cat_embedding_dims:list=None, # list of embedding dimensions for each observed categorical variable
+#         o_cont_idxs:list=None, # list of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.
+#         patch_len:int=None, # Number of time steps in each patch.
+#         patch_stride:int=None, # Stride of the patch.
+#         flatten:bool=False, # boolean indicating whether to flatten bacbone's output tensor
+#         use_bn:bool=False, # boolean indicating whether to use batch normalization in the head
+#         fc_dropout:float=0., # dropout probability for the fully connected layer in the head
+#         custom_head=None, # custom head to replace the default head
+#         **kwargs
+#     ):
+#         super().__init__()
+
+#         # attributes
+#         c_in = c_in or dls.vars
+#         c_out = c_out or dls.c
+#         seq_len = seq_len or dls.len
+#         d = d or (dls.d if dls is not None else None)
+#         self.c_in, self.c_out, self.seq_len, self.d = c_in, c_out, seq_len, d
+
+#         # tensor splitter
+#         if o_cont_idxs is None:
+#             o_cont_idxs = get_o_cont_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs)
+#         self.splitter = TensorSplitter(s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs)
+#         s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = self.splitter.s_cat_idxs, self.splitter.s_cont_idxs, self.splitter.o_cat_idxs, self.splitter.o_cont_idxs
+#         assert c_in == sum([len(s_cat_idxs), len(s_cont_idxs), len(o_cat_idxs), len(o_cont_idxs)])
+
+#         # embeddings
+#         self.s_embeddings = Embeddings(s_cat_embeddings, s_cat_embedding_dims)
+#         self.o_embeddings = Embeddings(o_cat_embeddings, o_cat_embedding_dims)
+
+#         # patch encoder
+#         if patch_len is not None:
+#             patch_stride = patch_stride or patch_len
+#             self.patch_encoder = PatchEncoder(patch_len, patch_stride, seq_len=seq_len)
+#             c_mult = patch_len
+#             seq_len = (seq_len + self.patch_encoder.pad_size - patch_len) // patch_stride + 1
+#         else:
+#             self.patch_encoder = nn.Identity()
+#             c_mult = 1
+
+#         # backbone
+#         n_s_features = len(s_cont_idxs) + self.s_embeddings.embedding_dims
+#         n_o_features = (len(o_cont_idxs) + self.o_embeddings.embedding_dims) * c_mult
+#         s_backbone = StaticBackbone(c_in=n_s_features, c_out=c_out, seq_len=1, **kwargs)
+#         if isinstance(arch, str):
+#             arch = get_arch(arch)
+#         if isinstance(arch, nn.Module):
+#             o_model = arch
+#         else:
+#             o_model = build_ts_model(arch, c_in=n_o_features, c_out=c_out, seq_len=seq_len, d=d, **kwargs)
+#         assert hasattr(o_model, "backbone"), "the selected arch must have a backbone"
+#         o_backbone = getattr(o_model, "backbone")
+
+#         # head
+#         o_head_nf = output_size_calculator(o_backbone, n_o_features, seq_len)[0]
+#         s_head_nf = s_backbone.head_nf
+#         self.backbone = nn.ModuleList([o_backbone, s_backbone])
+#         self.head_nf = o_head_nf + s_head_nf
+#         if custom_head is not None:
+#             if isinstance(custom_head, nn.Module): self.head = custom_head
+#             else:self. head = custom_head(self.head_nf, c_out, seq_len, d=d)
+#         else:
+#             if "rocket" in o_model.__name__.lower():
+#                 self.head = rocket_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, use_bn=use_bn, fc_dropout=fc_dropout)
+#             else:
+#                 self.head = lin_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, flatten=flatten, use_bn=use_bn, fc_dropout=fc_dropout)
+
+#     def forward(self, x):
+#         # split x into static cat, static cont, observed cat, and observed cont
+#         s_cat, s_cont, o_cat, o_cont = self.splitter(x)
+
+#         # create categorical embeddings
+#         s_cat = self.s_embeddings(s_cat)
+#         o_cat = self.o_embeddings(o_cat)
+
+#         # contatenate static and observed features
+#         s_x = torch.cat([s_cat, s_cont], 1)
+#         o_x = torch.cat([o_cat, o_cont], 1)
+
+#         # patch encoder
+#         o_x = self.patch_encoder(o_x)
+
+#         # pass static and observed features through their respective backbones
+#         for i,(b,xi) in enumerate(zip(self.backbone, [o_x, s_x])):
+#             if i == 0:
+#                 x = b(xi)
+#                 if x.ndim == 2:
+#                     x = x[..., None]
+#             else:
+#                 x = torch.cat([x,  b(xi)[..., None].repeat(1, 1, x.shape[-1])], 1)
+
+#         # head
+#         x = self.head(x)
+#         return x
+
+
+
# from tsai.models.InceptionTimePlus import InceptionTimePlus
+
+
+
# c_in = 6
+# c_out = 3
+# seq_len = 97
+# d = None
+
+# s_cat_idxs=2
+# s_cont_idxs=4
+# o_cat_idxs=[0, 3]
+# o_cont_idxs=None
+# s_cat_embeddings = 5
+# s_cat_embedding_dims = None
+# o_cat_embeddings = [7, 3]
+# o_cat_embedding_dims = [3, None]
+
+# t0 = torch.randint(0, 7, (16, 1, seq_len)) # cat
+# t1 = torch.randn(16, 1, seq_len)
+# t2 = torch.randint(0, 5, (16, 1, seq_len)) # cat
+# t3 = torch.randint(0, 3, (16, 1, seq_len)) # cat
+# t4 = torch.randn(16, 1, seq_len)
+# t5 = torch.randn(16, 1, seq_len)
+
+# t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float()
+
+# patch_lens = [None, 5, 5, 5, 5]
+# patch_strides = [None, None, 1, 3, 5]
+# for patch_len, patch_stride in zip(patch_lens, patch_strides):
+#     for arch in ["InceptionTimePlus", InceptionTimePlus, "MultiRocketPlus"]:
+#         print(f"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}")
+
+#         model = MultInputWrapper(
+#             arch=arch,
+#             c_in=c_in,
+#             c_out=c_out,
+#             seq_len=seq_len,
+#             d=d,
+#             s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,
+#             s_cont_idxs=s_cont_idxs,
+#             o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,
+#             o_cont_idxs=o_cont_idxs,
+#             patch_len=patch_len,
+#             patch_stride=patch_stride,
+#         )
+
+#         test_eq(model(t).shape, (16,3))
+
+
+

source

+
+
+

FusionMLP

+
+
 FusionMLP (comb_dim, layers, act='relu', dropout=0.0, use_bn=True)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
bs = 16
+emb_dim = 128
+seq_len = 20
+cat_dim = 24
+cont_feat = 3
+
+comb_dim = emb_dim + cat_dim + cont_feat
+emb = torch.randn(bs, emb_dim, seq_len)
+cat = torch.randn(bs, cat_dim)
+cont = torch.randn(bs, cont_feat)
+fusion_mlp = FusionMLP(comb_dim, layers=comb_dim, act='relu', dropout=.1)
+output = fusion_mlp(cat, cont, emb)
+test_eq(output.shape, (bs, comb_dim))
+
+
+
bs = 16
+emb_dim = 50000
+cat_dim = 24
+cont_feat = 3
+
+comb_dim = emb_dim + cat_dim + cont_feat
+emb = torch.randn(bs, emb_dim)
+cat = torch.randn(bs, cat_dim)
+cont = torch.randn(bs, cont_feat)
+fusion_mlp = FusionMLP(comb_dim, layers=[128], act='relu', dropout=.1)
+output = fusion_mlp(cat, cont, emb)
+test_eq(output.shape, (bs, 128))
+
+
+

source

+
+
+

MultInputBackboneWrapper

+
+
 MultInputBackboneWrapper (arch, c_in:int=None, seq_len:int=None,
+                           d:tuple=None,
+                           dls:tsai.data.core.TSDataLoaders=None,
+                           s_cat_idxs:list=None,
+                           s_cat_embeddings:list=None,
+                           s_cat_embedding_dims:list=None,
+                           s_cont_idxs:list=None, o_cat_idxs:list=None,
+                           o_cat_embeddings:list=None,
+                           o_cat_embedding_dims:list=None,
+                           o_cont_idxs:list=None, patch_len:int=None,
+                           patch_stride:int=None,
+                           fusion_layers:list=[128],
+                           fusion_act:str='relu',
+                           fusion_dropout:float=0.0,
+                           fusion_use_bn:bool=True, **kwargs)
+
+

Model backbone wrapper for input tensors with static and/ or observed, categorical and/ or numerical features.

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
arch
c_inintNonenumber of input variables
seq_lenintNoneinput sequence length
dtupleNoneshape of the output tensor
dlsTSDataLoadersNoneTSDataLoaders object
s_cat_idxslistNonelist of indices for static categorical variables
s_cat_embeddingslistNonelist of num_embeddings for each static categorical variable
s_cat_embedding_dimslistNonelist of embedding dimensions for each static categorical variable
s_cont_idxslistNonelist of indices for static continuous variables
o_cat_idxslistNonelist of indices for observed categorical variables
o_cat_embeddingslistNonelist of num_embeddings for each observed categorical variable
o_cat_embedding_dimslistNonelist of embedding dimensions for each observed categorical variable
o_cont_idxslistNonelist of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.
patch_lenintNoneNumber of time steps in each patch.
patch_strideintNoneStride of the patch.
fusion_layerslist[128]list of layer dimensions for the fusion MLP
fusion_actstrreluactivation function for the fusion MLP
fusion_dropoutfloat0.0dropout probability for the fusion MLP
fusion_use_bnboolTrueboolean indicating whether to use batch normalization in the fusion MLP
kwargs
+
+

source

+
+
+

MultInputWrapper

+
+
 MultInputWrapper (arch, c_in:int=None, c_out:int=1, seq_len:int=None,
+                   d:tuple=None, dls:tsai.data.core.TSDataLoaders=None,
+                   s_cat_idxs:list=None, s_cat_embeddings:list=None,
+                   s_cat_embedding_dims:list=None, s_cont_idxs:list=None,
+                   o_cat_idxs:list=None, o_cat_embeddings:list=None,
+                   o_cat_embedding_dims:list=None, o_cont_idxs:list=None,
+                   patch_len:int=None, patch_stride:int=None,
+                   fusion_layers:list=128, fusion_act:str='relu',
+                   fusion_dropout:float=0.0, fusion_use_bn:bool=True,
+                   custom_head=None, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
arch
c_inintNonenumber of input variables
c_outint1number of output variables
seq_lenintNoneinput sequence length
dtupleNoneshape of the output tensor
dlsTSDataLoadersNoneTSDataLoaders object
s_cat_idxslistNonelist of indices for static categorical variables
s_cat_embeddingslistNonelist of num_embeddings for each static categorical variable
s_cat_embedding_dimslistNonelist of embedding dimensions for each static categorical variable
s_cont_idxslistNonelist of indices for static continuous variables
o_cat_idxslistNonelist of indices for observed categorical variables
o_cat_embeddingslistNonelist of num_embeddings for each observed categorical variable
o_cat_embedding_dimslistNonelist of embedding dimensions for each observed categorical variable
o_cont_idxslistNonelist of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.
patch_lenintNoneNumber of time steps in each patch.
patch_strideintNoneStride of the patch.
fusion_layerslist128list of layer dimensions for the fusion MLP
fusion_actstrreluactivation function for the fusion MLP
fusion_dropoutfloat0.0dropout probability for the fusion MLP
fusion_use_bnboolTrueboolean indicating whether to use batch normalization in the fusion MLP
custom_headNoneTypeNonecustom head to replace the default head
kwargs
+
+
from tsai.models.InceptionTimePlus import InceptionTimePlus
+
+
+
bs = 8
+c_in = 6
+c_out = 3
+seq_len = 97
+d = None
+
+s_cat_idxs=2
+s_cont_idxs=4
+o_cat_idxs=[0, 3]
+o_cont_idxs=None
+s_cat_embeddings = 5
+s_cat_embedding_dims = None
+o_cat_embeddings = [7, 3]
+o_cat_embedding_dims = [3, None]
+
+fusion_layers = 128
+
+t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat
+t1 = torch.randn(bs, 1, seq_len)
+t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat
+t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat
+t4 = torch.randn(bs, 1, seq_len)
+t5 = torch.randn(bs, 1, seq_len)
+
+t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())
+
+patch_lens = [None, 5, 5, 5, 5]
+patch_strides = [None, None, 1, 3, 5]
+for patch_len, patch_stride in zip(patch_lens, patch_strides):
+    for arch in ["InceptionTimePlus", InceptionTimePlus, "TSiTPlus"]:
+        print(f"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}")
+
+        model = MultInputWrapper(
+            arch=arch,
+            c_in=c_in,
+            c_out=c_out,
+            seq_len=seq_len,
+            d=d,
+            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,
+            s_cont_idxs=s_cont_idxs,
+            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,
+            o_cont_idxs=o_cont_idxs,
+            patch_len=patch_len,
+            patch_stride=patch_stride,
+            fusion_layers=fusion_layers,
+        ).to(default_device())
+
+        test_eq(model(t).shape, (bs, c_out))
+
+
arch: InceptionTimePlus, patch_len: None, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None
+arch: TSiTPlus, patch_len: None, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None
+arch: TSiTPlus, patch_len: 5, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 1
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1
+arch: TSiTPlus, patch_len: 5, patch_stride: 1
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 3
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3
+arch: TSiTPlus, patch_len: 5, patch_stride: 3
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 5
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5
+arch: TSiTPlus, patch_len: 5, patch_stride: 5
+
+
+
+
bs = 8
+c_in = 6
+c_out = 3
+seq_len = 97
+d = None
+
+s_cat_idxs=None
+s_cont_idxs=4
+o_cat_idxs=[0, 3]
+o_cont_idxs=None
+s_cat_embeddings = None
+s_cat_embedding_dims = None
+o_cat_embeddings = [7, 3]
+o_cat_embedding_dims = [3, None]
+
+fusion_layers = 128
+
+t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat
+t1 = torch.randn(bs, 1, seq_len)
+t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat
+t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat
+t4 = torch.randn(bs, 1, seq_len)
+t5 = torch.randn(bs, 1, seq_len)
+
+t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())
+
+patch_lens = [None, 5, 5, 5, 5]
+patch_strides = [None, None, 1, 3, 5]
+for patch_len, patch_stride in zip(patch_lens, patch_strides):
+    for arch in ["InceptionTimePlus", InceptionTimePlus, "TSiTPlus"]:
+        print(f"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}")
+
+        model = MultInputWrapper(
+            arch=arch,
+            c_in=c_in,
+            c_out=c_out,
+            seq_len=seq_len,
+            d=d,
+            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,
+            s_cont_idxs=s_cont_idxs,
+            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,
+            o_cont_idxs=o_cont_idxs,
+            patch_len=patch_len,
+            patch_stride=patch_stride,
+            fusion_layers=fusion_layers,
+        ).to(default_device())
+
+        test_eq(model(t).shape, (bs, c_out))
+
+
arch: InceptionTimePlus, patch_len: None, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None
+arch: TSiTPlus, patch_len: None, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None
+arch: TSiTPlus, patch_len: 5, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 1
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1
+arch: TSiTPlus, patch_len: 5, patch_stride: 1
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 3
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3
+arch: TSiTPlus, patch_len: 5, patch_stride: 3
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 5
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5
+arch: TSiTPlus, patch_len: 5, patch_stride: 5
+
+
+
+
bs = 8
+c_in = 6
+c_out = 3
+seq_len = 97
+d = None
+
+s_cat_idxs=2
+s_cont_idxs=4
+o_cat_idxs=None
+o_cont_idxs=None
+s_cat_embeddings = 5
+s_cat_embedding_dims = None
+o_cat_embeddings = None
+o_cat_embedding_dims = None
+
+fusion_layers = 128
+
+t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat
+t1 = torch.randn(bs, 1, seq_len)
+t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat
+t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat
+t4 = torch.randn(bs, 1, seq_len)
+t5 = torch.randn(bs, 1, seq_len)
+
+t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())
+
+patch_lens = [None, 5, 5, 5, 5]
+patch_strides = [None, None, 1, 3, 5]
+for patch_len, patch_stride in zip(patch_lens, patch_strides):
+    for arch in ["InceptionTimePlus", InceptionTimePlus, "TSiTPlus"]:
+        print(f"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}")
+
+        model = MultInputWrapper(
+            arch=arch,
+            c_in=c_in,
+            c_out=c_out,
+            seq_len=seq_len,
+            d=d,
+            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,
+            s_cont_idxs=s_cont_idxs,
+            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,
+            o_cont_idxs=o_cont_idxs,
+            patch_len=patch_len,
+            patch_stride=patch_stride,
+            fusion_layers=fusion_layers,
+        ).to(default_device())
+
+        test_eq(model(t).shape, (bs, c_out))
+
+
arch: InceptionTimePlus, patch_len: None, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None
+arch: TSiTPlus, patch_len: None, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None
+arch: TSiTPlus, patch_len: 5, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 1
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1
+arch: TSiTPlus, patch_len: 5, patch_stride: 1
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 3
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3
+arch: TSiTPlus, patch_len: 5, patch_stride: 3
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 5
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5
+arch: TSiTPlus, patch_len: 5, patch_stride: 5
+
+
+
+
bs = 8
+c_in = 6
+c_out = 3
+seq_len = 97
+d = None
+
+s_cat_idxs=None
+s_cont_idxs=None
+o_cat_idxs=None
+o_cont_idxs=None
+s_cat_embeddings = None
+s_cat_embedding_dims = None
+o_cat_embeddings = None
+o_cat_embedding_dims = None
+
+fusion_layers = 128
+
+t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat
+t1 = torch.randn(bs, 1, seq_len)
+t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat
+t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat
+t4 = torch.randn(bs, 1, seq_len)
+t5 = torch.randn(bs, 1, seq_len)
+
+t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())
+
+patch_lens = [None, 5, 5, 5, 5]
+patch_strides = [None, None, 1, 3, 5]
+for patch_len, patch_stride in zip(patch_lens, patch_strides):
+    for arch in ["InceptionTimePlus", InceptionTimePlus, "TSiTPlus"]:
+        print(f"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}")
+
+        model = MultInputWrapper(
+            arch=arch,
+            c_in=c_in,
+            c_out=c_out,
+            seq_len=seq_len,
+            d=d,
+            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,
+            s_cont_idxs=s_cont_idxs,
+            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,
+            o_cont_idxs=o_cont_idxs,
+            patch_len=patch_len,
+            patch_stride=patch_stride,
+            fusion_layers=fusion_layers,
+        ).to(default_device())
+
+        test_eq(model(t).shape, (bs, c_out))
+
+
arch: InceptionTimePlus, patch_len: None, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None
+arch: TSiTPlus, patch_len: None, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: None
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None
+arch: TSiTPlus, patch_len: 5, patch_stride: None
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 1
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1
+arch: TSiTPlus, patch_len: 5, patch_stride: 1
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 3
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3
+arch: TSiTPlus, patch_len: 5, patch_stride: 3
+arch: InceptionTimePlus, patch_len: 5, patch_stride: 5
+arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5
+arch: TSiTPlus, patch_len: 5, patch_stride: 5
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.multirocketplus.html b/models.multirocketplus.html new file mode 100644 index 000000000..8540e222e --- /dev/null +++ b/models.multirocketplus.html @@ -0,0 +1,1508 @@ + + + + + + + + + +tsai - MultiRocketPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

MultiRocketPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

MultiRocket: Multiple pooling operators and transformations for fast and effective time series classification.

+
+

This is a Pytorch implementation of MultiRocket developed by Malcolm McLean and Ignacio Oguiza based on:

+

Tan, C. W., Dempster, A., Bergmeir, C., & Webb, G. I. (2022). MultiRocket: multiple pooling operators and transformations for fast and effective time series classification. Data Mining and Knowledge Discovery, 36(5), 1623-1646.

+

Original paper: https://link.springer.com/article/10.1007/s10618-022-00844-1

+

Original repository: https://github.com/ChangWeiTan/MultiRocket

+
+

source

+
+

Flatten

+
+
 Flatten (*args, **kwargs)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
from tsai.imports import default_device
+
+
+
o = torch.rand(2, 3, 5, 4).to(default_device()) - .3
+print(o)
+
+output = _LPVV(o, dim=2)
+print(output)  # Should print: torch.Size([2, 3, 4])
+
+
tensor([[[[ 0.5644, -0.0509, -0.0390,  0.4091],
+          [ 0.0517, -0.1471,  0.6458,  0.5593],
+          [ 0.4516, -0.0821,  0.1271,  0.0592],
+          [ 0.4151,  0.4376,  0.0763,  0.3780],
+          [ 0.2653, -0.1817,  0.0156,  0.4993]],
+
+         [[-0.0779,  0.0858,  0.1982,  0.3224],
+          [ 0.1130,  0.0714, -0.1779,  0.5360],
+          [-0.1848, -0.2270, -0.0925, -0.1217],
+          [ 0.2820, -0.0205, -0.2777,  0.3755],
+          [-0.2490,  0.2613,  0.4237,  0.4534]],
+
+         [[-0.0162,  0.6368,  0.0016,  0.1467],
+          [ 0.6035, -0.1365,  0.6930,  0.6943],
+          [ 0.2790,  0.3818, -0.0731,  0.0167],
+          [ 0.6442,  0.3443,  0.4829, -0.0944],
+          [ 0.2932,  0.6952,  0.5541,  0.5946]]],
+
+
+        [[[ 0.6757,  0.5740,  0.3071,  0.4400],
+          [-0.2344, -0.1056,  0.4773,  0.2432],
+          [ 0.2595, -0.1528, -0.0866,  0.6201],
+          [ 0.0657,  0.1220,  0.4849,  0.4254],
+          [ 0.3399, -0.1609,  0.3465,  0.2389]],
+
+         [[-0.0765,  0.0516,  0.0028,  0.4381],
+          [ 0.5212, -0.2781, -0.0896, -0.0301],
+          [ 0.6857,  0.3583,  0.5869,  0.3418],
+          [ 0.3002,  0.5135,  0.6011,  0.6499],
+          [-0.2807, -0.2888,  0.3965,  0.6585]],
+
+         [[-0.1368,  0.6677,  0.1439,  0.1434],
+          [-0.1820,  0.1041, -0.1211,  0.6103],
+          [ 0.5808,  0.4588,  0.4572,  0.3713],
+          [ 0.2389, -0.1392,  0.1371, -0.1570],
+          [ 0.2840,  0.1214, -0.0059,  0.5064]]]], device='mps:0')
+tensor([[[ 1.0000, -0.6000,  0.6000,  1.0000],
+         [-0.6000, -0.2000, -0.6000, -0.2000],
+         [ 0.6000,  0.2000, -0.2000,  0.2000]],
+
+        [[ 0.2000, -0.6000, -0.2000,  1.0000],
+         [ 0.2000, -0.2000,  0.2000,  0.2000],
+         [ 0.2000,  0.2000, -0.2000,  0.2000]]], device='mps:0')
+
+
+
+
output = _MPV(o, dim=2)
+print(output)  # Should print: torch.Size([2, 3, 4])
+
+
tensor([[[0.3496, 0.4376, 0.2162, 0.3810],
+         [0.1975, 0.1395, 0.3109, 0.4218],
+         [0.4550, 0.5145, 0.4329, 0.3631]],
+
+        [[0.3352, 0.3480, 0.4040, 0.3935],
+         [0.5023, 0.3078, 0.3968, 0.5221],
+         [0.3679, 0.3380, 0.2460, 0.4079]]], device='mps:0')
+
+
+
+
output = _RSPV(o, dim=2)
+print(output)  # Should print: torch.Size([2, 3, 4])
+
+
tensor([[[ 1.0000, -0.0270,  0.9138,  1.0000],
+         [-0.1286,  0.2568,  0.0630,  0.8654],
+         [ 0.9823,  0.8756,  0.9190,  0.8779]],
+
+        [[ 0.7024,  0.2482,  0.8983,  1.0000],
+         [ 0.6168,  0.2392,  0.8931,  0.9715],
+         [ 0.5517,  0.8133,  0.7065,  0.8244]]], device='mps:0')
+
+
+
+
output = _PPV(o, dim=2)
+print(output)  # Should print: torch.Size([2, 3, 4])
+
+
tensor([[[-0.3007, -1.0097, -0.6697, -0.2381],
+         [-1.0466, -0.9316, -0.9705, -0.3738],
+         [-0.2786, -0.2314, -0.3366, -0.4569]],
+
+        [[-0.5574, -0.8893, -0.3883, -0.2130],
+         [-0.5401, -0.8574, -0.4009, -0.1767],
+         [-0.6861, -0.5149, -0.7555, -0.4102]]], device='mps:0')
+
+
+
+

source

+
+
+

MultiRocketFeaturesPlus

+
+
 MultiRocketFeaturesPlus (c_in, seq_len, num_features=10000,
+                          max_dilations_per_kernel=32, kernel_size=9,
+                          max_num_channels=9, max_num_kernels=84,
+                          diff=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

MultiRocketBackbonePlus

+
+
 MultiRocketBackbonePlus (c_in, seq_len, num_features=50000,
+                          max_dilations_per_kernel=32, kernel_size=9,
+                          max_num_channels=None, max_num_kernels=84,
+                          use_diff=True)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

MultiRocketPlus

+
+
 MultiRocketPlus (c_in, c_out, seq_len, d=None, num_features=50000,
+                  max_dilations_per_kernel=32, kernel_size=9,
+                  max_num_channels=None, max_num_kernels=84, use_bn=True,
+                  fc_dropout=0, custom_head=None, zero_init=True,
+                  use_diff=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.imports import default_device
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = MultiRocketPlus(5, 3, 20, d=None, use_diff=True).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 20)).to(default_device())
+
+model = MultiRocketPlus(5, 3, 20, d=None, use_diff=False).to(default_device())
+output = model(xb)
+assert output.shape == (16, 3)
+output.shape
+
+
torch.Size([16, 3])
+
+
+
+
xb = torch.randn(16, 5, 20).to(default_device())
+yb = torch.randint(0, 3, (16, 5, 20)).to(default_device())
+
+model = MultiRocketPlus(5, 3, 20, d=20, use_diff=True).to(default_device())
+output = model(xb)
+assert output.shape == (16, 20, 3)
+output.shape
+
+
torch.Size([16, 20, 3])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.mwdn.html b/models.mwdn.html new file mode 100644 index 000000000..b937f2378 --- /dev/null +++ b/models.mwdn.html @@ -0,0 +1,1363 @@ + + + + + + + + + +tsai - mWDN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

mWDN

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

multilevel Wavelet Decomposition Network (mWDN)

+
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+

source

+
+

WaveBlock

+
+
 WaveBlock (c_in, c_out, seq_len, wavelet=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

mWDNPlus

+
+
 mWDNPlus (c_in, c_out, seq_len, d=None, levels=3, wavelet=None,
+           base_model=None, base_arch=<class
+           'tsai.models.InceptionTimePlus.InceptionTimePlus'>, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

mWDNBlocks

+
+
 mWDNBlocks (c_in, c_out, seq_len, levels=3, wavelet=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

mWDN

+
+
 mWDN (c_in, c_out, seq_len, levels=3, wavelet=None, base_arch=<class
+       'tsai.models.InceptionTimePlus.InceptionTimePlus'>, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.models.TSTPlus import TSTPlus
+
+
+
bs = 16
+c_in = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len).to(default_device())
+test_eq(mWDN(c_in, c_out, seq_len).to(xb.device)(xb).shape, [bs, c_out])
+model = mWDNPlus(c_in, c_out, seq_len, fc_dropout=.5)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+model = mWDNPlus(c_in, c_out, seq_len, base_arch=TSTPlus, fc_dropout=.5)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+
+
+
model.head, model.head_nf
+
+
(Sequential(
+   (0): GELU(approximate='none')
+   (1): fastai.layers.Flatten(full=False)
+   (2): LinBnDrop(
+     (0): Dropout(p=0.5, inplace=False)
+     (1): Linear(in_features=1536, out_features=2, bias=True)
+   )
+ ),
+ 128)
+
+
+
+
bs = 16
+c_in = 3
+seq_len = 12
+d = 10
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len).to(default_device())
+model = mWDNPlus(c_in, c_out, seq_len, d=d)
+test_eq(model.to(xb.device)(xb).shape, [bs, d, c_out])
+
+
+
bs = 16
+c_in = 3
+seq_len = 12
+d = (5, 2)
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len).to(default_device())
+model = mWDNPlus(c_in, c_out, seq_len, d=d)
+test_eq(model.to(xb.device)(xb).shape, [bs, *d, c_out])
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.omniscalecnn.html b/models.omniscalecnn.html new file mode 100644 index 000000000..94ddb716e --- /dev/null +++ b/models.omniscalecnn.html @@ -0,0 +1,1376 @@ + + + + + + + + + +tsai - OmniScaleCNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

OmniScaleCNN

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

generate_layer_parameter_list

+
+
 generate_layer_parameter_list (start, end, layers, in_channel=1)
+
+
+

source

+
+
+

get_out_channel_number

+
+
 get_out_channel_number (paramenter_layer, in_channel, prime_list)
+
+
+

source

+
+
+

get_Prime_number_in_a_range

+
+
 get_Prime_number_in_a_range (start, end)
+
+
+

source

+
+
+

OmniScaleCNN

+
+
 OmniScaleCNN (c_in, c_out, seq_len, layers=[1024, 229376],
+               few_shot=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

build_layer_with_layer_parameter

+
+
 build_layer_with_layer_parameter (layer_parameters)
+
+

formerly build_layer_with_layer_parameter

+
+

source

+
+
+

SampaddingConv1D_BN

+
+
 SampaddingConv1D_BN (in_channels, out_channels, kernel_size)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+c_in = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len)
+m = create_model(OmniScaleCNN, c_in, c_out, seq_len)
+test_eq(OmniScaleCNN(c_in, c_out, seq_len)(xb).shape, [bs, c_out])
+m
+
+
OmniScaleCNN(
+  (net): Sequential(
+    (0): build_layer_with_layer_parameter(
+      (conv_list): ModuleList(
+        (0): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 0), value=0)
+          (conv1d): Conv1d(3, 56, kernel_size=(1,), stride=(1,))
+          (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (1): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 1), value=0)
+          (conv1d): Conv1d(3, 56, kernel_size=(2,), stride=(1,))
+          (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (2): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(1, 1), value=0)
+          (conv1d): Conv1d(3, 56, kernel_size=(3,), stride=(1,))
+          (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+      )
+    )
+    (1): build_layer_with_layer_parameter(
+      (conv_list): ModuleList(
+        (0): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 0), value=0)
+          (conv1d): Conv1d(168, 227, kernel_size=(1,), stride=(1,))
+          (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (1): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 1), value=0)
+          (conv1d): Conv1d(168, 227, kernel_size=(2,), stride=(1,))
+          (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (2): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(1, 1), value=0)
+          (conv1d): Conv1d(168, 227, kernel_size=(3,), stride=(1,))
+          (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+      )
+    )
+    (2): build_layer_with_layer_parameter(
+      (conv_list): ModuleList(
+        (0): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 0), value=0)
+          (conv1d): Conv1d(681, 510, kernel_size=(1,), stride=(1,))
+          (bn): BatchNorm1d(510, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+        (1): SampaddingConv1D_BN(
+          (padding): ConstantPad1d(padding=(0, 1), value=0)
+          (conv1d): Conv1d(681, 510, kernel_size=(2,), stride=(1,))
+          (bn): BatchNorm1d(510, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        )
+      )
+    )
+  )
+  (gap): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Flatten(full=False)
+  )
+  (hidden): Linear(in_features=1020, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.patchtst.html b/models.patchtst.html new file mode 100644 index 000000000..b9ff2299c --- /dev/null +++ b/models.patchtst.html @@ -0,0 +1,1684 @@ + + + + + + + + + +tsai - PatchTST + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

PatchTST

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation of PatchTST created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:

+

In this notebook, we are going to use a new state-of-the-art model called PatchTST (Nie et al, 2022) to create a long-term time series forecast.

+

Here are some paper details:

+ +
@article{Yuqietal-2022-PatchTST,
+  title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers},
+  author={Yuqi Nie and 
+          Nam H. Nguyen and 
+          Phanwadee Sinthong and 
+          Jayant Kalagnanam},
+  journal={arXiv preprint arXiv:2211.14730},
+  year={2022}
+}
+

PatchTST has shown some impressive results across some of the most widely used long-term datasets for benchmarking:

+
+
+

+
image.png
+
+
+
+

source

+
+

SeriesDecomposition

+
+
 SeriesDecomposition (kernel_size:int)
+
+

Series decomposition block

+ + + + + + + + + + + + + + + +
TypeDetails
kernel_sizeintthe size of the window
+
+

source

+
+
+

MovingAverage

+
+
 MovingAverage (kernel_size:int)
+
+

Moving average block to highlight the trend of time series

+ + + + + + + + + + + + + + + +
TypeDetails
kernel_sizeintthe size of the window
+
+

source

+
+
+

Flatten_Head

+
+
 Flatten_Head (individual, n_vars, nf, pred_dim)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

PatchTST

+
+
 PatchTST (c_in, c_out, seq_len, pred_dim=None, n_layers=2, n_heads=8,
+           d_model=512, d_ff=2048, dropout=0.05, attn_dropout=0.0,
+           patch_len=16, stride=8, padding_patch=True, revin=True,
+           affine=False, individual=False, subtract_last=False,
+           decomposition=False, kernel_size=25, activation='gelu',
+           norm='BatchNorm', pre_norm=False, res_attention=True,
+           store_attn=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+ + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
TypeDefaultDetails
c_innumber of input channels
c_outused for compatibility
seq_leninput sequence length
pred_dimNoneTypeNoneprediction sequence length
n_layersint2number of encoder layers
n_headsint8number of heads
d_modelint512dimension of model
d_ffint2048dimension of fully connected network (fcn)
dropoutfloat0.05dropout applied to all linear layers in the encoder
attn_dropoutfloat0.0dropout applied to the attention scores
patch_lenint16patch_len
strideint8stride
padding_patchboolTrueflag to indicate if padded is added if necessary
revinboolTrueRevIN
affineboolFalseRevIN affine
individualboolFalseindividual head
subtract_lastboolFalsesubtract_last
decompositionboolFalseapply decomposition
kernel_sizeint25decomposition kernel size
activationstrgeluactivation function of intermediate layer, relu or gelu.
normstrBatchNormtype of normalization layer used in the encoder
pre_normboolFalseflag to indicate if normalization is applied as the first step in the sublayers
res_attentionboolTrueflag to indicate if Residual MultiheadAttention should be used
store_attnboolFalsecan be used to visualize attention weights
+
+
from fastcore.test import test_eq
+from tsai.models.utils import count_parameters
+
+bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 1
+seq_len = 60
+pred_dim = 20
+
+xb = torch.randn(bs, c_in, seq_len)
+
+arch_config=dict(
+        n_layers=3,  # number of encoder layers
+        n_heads=16,  # number of heads
+        d_model=128,  # dimension of model
+        d_ff=256,  # dimension of fully connected network (fcn)
+        attn_dropout=0.,
+        dropout=0.2,  # dropout applied to all linear layers in the encoder
+        patch_len=16,  # patch_len
+        stride=8,  # stride
+    )
+
+model = PatchTST(c_in, c_out, seq_len, pred_dim, **arch_config)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_in, pred_dim])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 418470
+
+
+
+
+

Test conversion to Torchscript

+
+
import gc
+import os
+import torch
+import torch.nn as nn
+from fastcore.test import test_eq, test_close
+
+
+bs = 1
+new_bs = 8
+c_in = 3
+c_out = 1
+seq_len = 96
+pred_dim = 20
+
+# module
+model = PatchTST(c_in, c_out, seq_len, pred_dim)
+model = model.eval()
+
+# input data
+inp = torch.rand(bs, c_in, seq_len)
+new_inp = torch.rand(new_bs, c_in, seq_len)
+
+# original
+try:
+    output = model(inp)
+    new_output = model(new_inp)
+    print(f'{"original":10}: ok')
+except:
+    print(f'{"original":10}: failed')
+
+# tracing
+try:
+    traced_model = torch.jit.trace(model, inp)
+    file_path = f"_test_traced_model.pt"
+    torch.jit.save(traced_model, file_path)
+    traced_model = torch.jit.load(file_path)
+    test_eq(output, traced_model(inp))
+    test_eq(new_output, traced_model(new_inp))
+    os.remove(file_path)
+    del traced_model
+    gc.collect()
+    print(f'{"tracing":10}: ok')
+except:
+    print(f'{"tracing":10}: failed')
+
+# scripting
+try:
+    scripted_model = torch.jit.script(model)
+    file_path = f"_test_scripted_model.pt"
+    torch.jit.save(scripted_model, file_path)
+    scripted_model = torch.jit.load(file_path)
+    test_eq(output, scripted_model(inp))
+    test_eq(new_output, scripted_model(new_inp))
+    os.remove(file_path)
+    del scripted_model
+    gc.collect()
+    print(f'{"scripting":10}: ok')
+except:
+    print(f'{"scripting":10}: failed')
+
+
original  : ok
+tracing   : ok
+scripting : failed
+
+
+
+
+

Test conversion to onnx

+
+
try:
+    import onnx
+    import onnxruntime as ort
+    
+    try:
+        file_path = "_model_cpu.onnx"
+        torch.onnx.export(model.cpu(),               # model being run
+                        inp,                       # model input (or a tuple for multiple inputs)
+                        file_path,                 # where to save the model (can be a file or file-like object)
+                        input_names = ['input'],   # the model's input names
+                        output_names = ['output'], # the model's output names
+                        dynamic_axes={
+                            'input'  : {0 : 'batch_size'}, 
+                            'output' : {0 : 'batch_size'}} # variable length axes
+                        )
+
+
+        # Load the model and check it's ok
+        onnx_model = onnx.load(file_path)
+        onnx.checker.check_model(onnx_model)
+        del onnx_model
+        gc.collect()
+
+        # New session
+        ort_sess = ort.InferenceSession(file_path)
+        output_onnx = ort_sess.run(None, {'input': inp.numpy()})[0]
+        test_close(output.detach().numpy(), output_onnx)
+        new_output_onnx = ort_sess.run(None, {'input': new_inp.numpy()})[0]
+        test_close(new_output.detach().numpy(), new_output_onnx)
+        os.remove(file_path)
+        print(f'{"onnx":10}: ok')
+    except:
+        print(f'{"onnx":10}: failed')
+
+except ImportError:
+    print('onnx and onnxruntime are not installed. Please install them to run this test')
+
+
onnx and onnxruntime are not installed. Please install them to run this test
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.positional_encoders.html b/models.positional_encoders.html new file mode 100644 index 000000000..8d6ec48ed --- /dev/null +++ b/models.positional_encoders.html @@ -0,0 +1,1379 @@ + + + + + + + + + +tsai - Positional encoders + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Positional encoders

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This includes some variations of positional encoders used with Transformers.

+
+
+

Imports

+
+
+

Positional encoders

+
+

source

+
+

PositionalEncoding

+
+
 PositionalEncoding (q_len, d_model, normalize=True)
+
+
+
pe = PositionalEncoding(1000, 512).detach().cpu().numpy()
+plt.pcolormesh(pe, cmap='viridis')
+plt.title('PositionalEncoding')
+plt.colorbar()
+plt.show()
+pe.mean(), pe.std(), pe.min(), pe.max(), pe.shape
+
+
+
+

+
+
+
+
+
+

source

+
+
+

Coord2dPosEncoding

+
+
 Coord2dPosEncoding (q_len, d_model, exponential=False, normalize=True,
+                     eps=0.001, verbose=False)
+
+
+
cpe = Coord2dPosEncoding(1000, 512, exponential=True, normalize=True).cpu().numpy()
+plt.pcolormesh(cpe, cmap='viridis')
+plt.title('Coord2dPosEncoding')
+plt.colorbar()
+plt.show()
+plt.plot(cpe.mean(0))
+plt.show()
+plt.plot(cpe.mean(1))
+plt.show()
+cpe.mean(), cpe.std(), cpe.min(), cpe.max()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

Coord1dPosEncoding

+
+
 Coord1dPosEncoding (q_len, exponential=False, normalize=True)
+
+
+
cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy()
+plt.pcolormesh(cpe, cmap='viridis')
+plt.title('Coord1dPosEncoding')
+plt.colorbar()
+plt.show()
+plt.plot(cpe.mean(1))
+plt.show()
+cpe.mean(), cpe.std(), cpe.min(), cpe.max(), cpe.shape
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy()
+plt.pcolormesh(cpe, cmap='viridis')
+plt.title('Coord1dPosEncoding')
+plt.colorbar()
+plt.show()
+plt.plot(cpe.mean(1))
+plt.show()
+cpe.mean(), cpe.std(), cpe.min(), cpe.max()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rescnn.html b/models.rescnn.html new file mode 100644 index 000000000..2c00f141f --- /dev/null +++ b/models.rescnn.html @@ -0,0 +1,1342 @@ + + + + + + + + + +tsai - ResCNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ResCNN

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+
from tsai.models.utils import *
+
+
+

source

+
+

ResCNN

+
+
 ResCNN (c_in, c_out, coord=False, separable=False, zero_norm=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
xb = torch.rand(16, 3, 10)
+test_eq(ResCNN(3,2,coord=True, separable=True)(xb).shape, [xb.shape[0], 2])
+test_eq(count_parameters(ResCNN(3,2)), 257283)
+
+
+
ResCNN(3,2,coord=True, separable=True)
+
+
ResCNN(
+  (block1): _ResCNNBlock(
+    (convblock1): ConvBlock(
+      (0): AddCoords1d()
+      (1): SeparableConv1d(
+        (depthwise_conv): Conv1d(4, 4, kernel_size=(7,), stride=(1,), padding=(3,), groups=4, bias=False)
+        (pointwise_conv): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)
+      )
+      (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (convblock2): ConvBlock(
+      (0): AddCoords1d()
+      (1): SeparableConv1d(
+        (depthwise_conv): Conv1d(65, 65, kernel_size=(5,), stride=(1,), padding=(2,), groups=65, bias=False)
+        (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+      )
+      (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (convblock3): ConvBlock(
+      (0): AddCoords1d()
+      (1): SeparableConv1d(
+        (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)
+        (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+      )
+      (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (shortcut): ConvBlock(
+      (0): AddCoords1d()
+      (1): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)
+      (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (add): Add
+    (act): ReLU()
+  )
+  (block2): ConvBlock(
+    (0): AddCoords1d()
+    (1): SeparableConv1d(
+      (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)
+      (pointwise_conv): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)
+    )
+    (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (3): LeakyReLU(negative_slope=0.2)
+  )
+  (block3): ConvBlock(
+    (0): AddCoords1d()
+    (1): SeparableConv1d(
+      (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)
+      (pointwise_conv): Conv1d(129, 256, kernel_size=(1,), stride=(1,), bias=False)
+    )
+    (2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (3): PReLU(num_parameters=1)
+  )
+  (block4): ConvBlock(
+    (0): AddCoords1d()
+    (1): SeparableConv1d(
+      (depthwise_conv): Conv1d(257, 257, kernel_size=(3,), stride=(1,), padding=(1,), groups=257, bias=False)
+      (pointwise_conv): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)
+    )
+    (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (3): ELU(alpha=0.3)
+  )
+  (gap): AdaptiveAvgPool1d(output_size=1)
+  (squeeze): Squeeze(dim=-1)
+  (lin): Linear(in_features=128, out_features=2, bias=True)
+)
+
+
+
+
check_weight(ResCNN(3,2, zero_norm=True), is_bn)
+
+
(array([1., 1., 0., 1., 1., 1., 1.], dtype=float32),
+ array([0., 0., 0., 0., 0., 0., 0.], dtype=float32))
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.resnet.html b/models.resnet.html new file mode 100644 index 000000000..eb691d209 --- /dev/null +++ b/models.resnet.html @@ -0,0 +1,1343 @@ + + + + + + + + + +tsai - ResNet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ResNet

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

ResNet

+
+
 ResNet (c_in, c_out)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

ResBlock

+
+
 ResBlock (ni, nf, kss=[7, 5, 3])
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
xb = torch.rand(2, 3, 4)
+test_eq(ResNet(3,2)(xb).shape, [xb.shape[0], 2])
+test_eq(count_parameters(ResNet(3, 2)), 479490) # for (3,2)
+
+
+
ResNet(3,2)
+
+
ResNet(
+  (resblock1): ResBlock(
+    (convblock1): ConvBlock(
+      (0): Conv1d(3, 64, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+      (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock2): ConvBlock(
+      (0): Conv1d(64, 64, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+      (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock3): ConvBlock(
+      (0): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+      (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (shortcut): ConvBlock(
+      (0): Conv1d(3, 64, kernel_size=(1,), stride=(1,), bias=False)
+      (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (add): Add
+    (act): ReLU()
+  )
+  (resblock2): ResBlock(
+    (convblock1): ConvBlock(
+      (0): Conv1d(64, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock2): ConvBlock(
+      (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock3): ConvBlock(
+      (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (shortcut): ConvBlock(
+      (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (add): Add
+    (act): ReLU()
+  )
+  (resblock3): ResBlock(
+    (convblock1): ConvBlock(
+      (0): Conv1d(128, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock2): ConvBlock(
+      (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (convblock3): ConvBlock(
+      (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    )
+    (shortcut): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (add): Add
+    (act): ReLU()
+  )
+  (gap): AdaptiveAvgPool1d(output_size=1)
+  (squeeze): Squeeze(dim=-1)
+  (fc): Linear(in_features=128, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.resnetplus.html b/models.resnetplus.html new file mode 100644 index 000000000..967547ec8 --- /dev/null +++ b/models.resnetplus.html @@ -0,0 +1,1433 @@ + + + + + + + + + +tsai - ResNetPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ResNetPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

ResNetPlus

+
+
 ResNetPlus (c_in, c_out, seq_len=None, nf=64, sa=False, se=None,
+             fc_dropout=0.0, concat_pool=False, flatten=False,
+             custom_head=None, y_range=None, ks=[7, 5, 3], coord=False,
+             separable=False, bn_1st=True, zero_norm=False, act=<class
+             'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

ResBlockPlus

+
+
 ResBlockPlus (ni, nf, ks=[7, 5, 3], coord=False, separable=False,
+               bn_1st=True, zero_norm=False, sa=False, se=None, act=<class
+               'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.models.layers import Swish
+
+
+
xb = torch.rand(2, 3, 4)
+test_eq(ResNetPlus(3,2)(xb).shape, [xb.shape[0], 2])
+test_eq(ResNetPlus(3,2,coord=True, separable=True, bn_1st=False, zero_norm=True, act=Swish, act_kwargs={}, fc_dropout=0.5)(xb).shape, [xb.shape[0], 2])
+test_eq(count_parameters(ResNetPlus(3, 2)), 479490) # for (3,2)
+
+
+
from tsai.models.ResNet import *
+
+
+
test_eq(count_parameters(ResNet(3, 2)), count_parameters(ResNetPlus(3, 2))) # for (3,2)
+
+
+
m = ResNetPlus(3, 2, zero_norm=True, coord=True, separable=True)
+print('n_params:', count_parameters(m))
+print(m)
+print(check_weight(m, is_bn)[0])
+
+
n_params: 114820
+ResNetPlus(
+  (backbone): Sequential(
+    (0): ResBlockPlus(
+      (convblock1): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(4, 4, kernel_size=(7,), stride=(1,), padding=(3,), groups=4, bias=False)
+          (pointwise_conv): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock2): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(65, 65, kernel_size=(5,), stride=(1,), padding=(2,), groups=65, bias=False)
+          (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock3): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)
+          (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (shortcut): ConvBlock(
+        (0): AddCoords1d()
+        (1): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)
+        (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (add): Add
+      (act): ReLU()
+    )
+    (1): ResBlockPlus(
+      (convblock1): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(65, 65, kernel_size=(7,), stride=(1,), padding=(3,), groups=65, bias=False)
+          (pointwise_conv): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock2): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(129, 129, kernel_size=(5,), stride=(1,), padding=(2,), groups=129, bias=False)
+          (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock3): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)
+          (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (shortcut): ConvBlock(
+        (0): AddCoords1d()
+        (1): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (add): Add
+      (act): ReLU()
+    )
+    (2): ResBlockPlus(
+      (convblock1): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(129, 129, kernel_size=(7,), stride=(1,), padding=(3,), groups=129, bias=False)
+          (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock2): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(129, 129, kernel_size=(5,), stride=(1,), padding=(2,), groups=129, bias=False)
+          (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+        (3): ReLU()
+      )
+      (convblock3): ConvBlock(
+        (0): AddCoords1d()
+        (1): SeparableConv1d(
+          (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)
+          (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (shortcut): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (add): Add
+      (act): ReLU()
+    )
+  )
+  (head): Sequential(
+    (0): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+    (1): Linear(in_features=128, out_features=2, bias=True)
+  )
+)
+[1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 0. 1.]
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnn.html b/models.rnn.html new file mode 100644 index 000000000..6dd7a14f0 --- /dev/null +++ b/models.rnn.html @@ -0,0 +1,1503 @@ + + + + + + + + + +tsai - RNNs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNNs

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

These are RNN, LSTM and GRU PyTorch implementations created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

GRU

+
+
 GRU (c_in, c_out, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0,
+      bidirectional=False, fc_dropout=0.0, init_weights=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

LSTM

+
+
 LSTM (c_in, c_out, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0,
+       bidirectional=False, fc_dropout=0.0, init_weights=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

RNN

+
+
 RNN (c_in, c_out, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0,
+      bidirectional=False, fc_dropout=0.0, init_weights=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+c_in = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len)
+test_eq(RNN(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
+test_eq(RNN(c_in, c_out)(xb).shape, [bs, c_out])
+test_eq(RNN(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, [bs, c_out])
+test_eq(LSTM(c_in, c_out)(xb).shape, [bs, c_out])
+test_eq(GRU(c_in, c_out)(xb).shape, [bs, c_out])
+
+
+
from tsai.basics import *
+
+
+
dsid = 'NATOPS' 
+bs = 16
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms  = [None, [TSCategorize()]]
+dsets = TSDatasets(X, y, tfms=tfms, splits=splits)
+dls   = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=bs, num_workers=0, shuffle=False)
+model = LSTM(dls.vars, dls.c)
+learn = Learner(dls, model,  metrics=accuracy)
+learn.fit_one_cycle(1, 3e-3)
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
01.7434401.6330680.36111100:01
+
+
+
+
m = RNN(c_in, c_out, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
RNN(
+  (rnn): RNN(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+  (dropout): Dropout(p=0.5, inplace=False)
+  (fc): Linear(in_features=200, out_features=2, bias=True)
+)
+81802
+
+
+
torch.Size([16, 2])
+
+
+
+
m = LSTM(c_in, c_out, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
LSTM(
+  (rnn): LSTM(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+  (dropout): Dropout(p=0.5, inplace=False)
+  (fc): Linear(in_features=200, out_features=2, bias=True)
+)
+326002
+
+
+
torch.Size([16, 2])
+
+
+
+
m = GRU(c_in, c_out, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
GRU(
+  (rnn): GRU(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+  (dropout): Dropout(p=0.5, inplace=False)
+  (fc): Linear(in_features=200, out_features=2, bias=True)
+)
+244602
+
+
+
torch.Size([16, 2])
+
+
+
+
+

Converting a model to TorchScript

+
+
model = LSTM(c_in, c_out, hidden_size=100, n_layers=2, bidirectional=True, rnn_dropout=.5, fc_dropout=.5)
+model.eval()
+inp = torch.rand(1, c_in, 50)
+output = model(inp)
+print(output)
+
+
tensor([[-0.0287, -0.0105]], grad_fn=<AddmmBackward0>)
+
+
+
+

Tracing

+
+
# save to gpu, cpu or both
+traced_cpu = torch.jit.trace(model.cpu(), inp)
+print(traced_cpu)
+torch.jit.save(traced_cpu, "cpu.pt")
+
+# load cpu or gpu model
+traced_cpu = torch.jit.load("cpu.pt")
+test_eq(traced_cpu(inp), output)
+
+!rm "cpu.pt"
+
+
LSTM(
+  original_name=LSTM
+  (rnn): LSTM(original_name=LSTM)
+  (dropout): Dropout(original_name=Dropout)
+  (fc): Linear(original_name=Linear)
+)
+
+
+
+
+

Scripting

+
+
# save to gpu, cpu or both
+scripted_cpu = torch.jit.script(model.cpu())
+print(scripted_cpu)
+torch.jit.save(scripted_cpu, "cpu.pt")
+
+# load cpu or gpu model
+scripted_cpu = torch.jit.load("cpu.pt")
+test_eq(scripted_cpu(inp), output)
+
+!rm "cpu.pt"
+
+
RecursiveScriptModule(
+  original_name=LSTM
+  (rnn): RecursiveScriptModule(original_name=LSTM)
+  (dropout): RecursiveScriptModule(original_name=Dropout)
+  (fc): RecursiveScriptModule(original_name=Linear)
+)
+
+
+
+
+
+

Converting a model to ONNX

+
import onnx
+
+# Export the model
+torch.onnx.export(model.cpu(),               # model being run
+                  inp,                       # model input (or a tuple for multiple inputs)
+                  "cpu.onnx",                # where to save the model (can be a file or file-like object)
+                  export_params=True,        # store the trained parameter weights inside the model file
+                  verbose=False,
+                  opset_version=13,          # the ONNX version to export the model to
+                  do_constant_folding=True,  # whether to execute constant folding for optimization
+                  input_names = ['input'],   # the model's input names
+                  output_names = ['output'], # the model's output names
+                  dynamic_axes={
+                      'input'  : {0 : 'batch_size'}, 
+                      'output' : {0 : 'batch_size'}} # variable length axes
+                 )
+
+# Load the model and check it's ok
+onnx_model = onnx.load("cpu.onnx")
+onnx.checker.check_model(onnx_model)
+
+# You can ignore the WARNINGS below
+
import onnxruntime as ort
+
+ort_sess = ort.InferenceSession('cpu.onnx')
+out = ort_sess.run(None, {'input': inp.numpy()})
+
+# input & output names
+input_name = ort_sess.get_inputs()[0].name
+output_name = ort_sess.get_outputs()[0].name
+
+# input dimensions
+input_dims = ort_sess.get_inputs()[0].shape
+print(input_name, output_name, input_dims)
+
+test_close(out, output.detach().numpy())
+!rm "cpu.onnx"
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnn_fcn.html b/models.rnn_fcn.html new file mode 100644 index 000000000..1466e4dc6 --- /dev/null +++ b/models.rnn_fcn.html @@ -0,0 +1,1380 @@ + + + + + + + + + +tsai - RNN_FCN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNN_FCN

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

MGRU_FCN

+
+
 MGRU_FCN (*args, se=16, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

MLSTM_FCN

+
+
 MLSTM_FCN (*args, se=16, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

MRNN_FCN

+
+
 MRNN_FCN (*args, se=16, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

GRU_FCN

+
+
 GRU_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,
+          bias=True, cell_dropout=0, rnn_dropout=0.8, bidirectional=False,
+          shuffle=True, fc_dropout=0.0, conv_layers=[128, 256, 128],
+          kss=[7, 5, 3], se=0)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

LSTM_FCN

+
+
 LSTM_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,
+           bias=True, cell_dropout=0, rnn_dropout=0.8,
+           bidirectional=False, shuffle=True, fc_dropout=0.0,
+           conv_layers=[128, 256, 128], kss=[7, 5, 3], se=0)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

RNN_FCN

+
+
 RNN_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,
+          bias=True, cell_dropout=0, rnn_dropout=0.8, bidirectional=False,
+          shuffle=True, fc_dropout=0.0, conv_layers=[128, 256, 128],
+          kss=[7, 5, 3], se=0)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+test_eq(RNN_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(LSTM_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(MLSTM_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(GRU_FCN(n_vars, c_out, shuffle=False)(xb).shape, [bs, c_out])
+test_eq(GRU_FCN(n_vars, c_out, seq_len, shuffle=False)(xb).shape, [bs, c_out])
+
+
+
LSTM_FCN(n_vars, seq_len, c_out, se=8)
+
+
LSTM_FCN(
+  (rnn): LSTM(2, 100, batch_first=True)
+  (rnn_dropout): Dropout(p=0.8, inplace=False)
+  (convblock1): ConvBlock(
+    (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (se1): SqueezeExciteBlock(
+    (avg_pool): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Flatten(full=False)
+    )
+    (fc): Sequential(
+      (0): Linear(in_features=128, out_features=16, bias=False)
+      (1): ReLU()
+      (2): Linear(in_features=16, out_features=128, bias=False)
+      (3): Sigmoid()
+    )
+  )
+  (convblock2): ConvBlock(
+    (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+    (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (se2): SqueezeExciteBlock(
+    (avg_pool): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Flatten(full=False)
+    )
+    (fc): Sequential(
+      (0): Linear(in_features=256, out_features=32, bias=False)
+      (1): ReLU()
+      (2): Linear(in_features=32, out_features=256, bias=False)
+      (3): Sigmoid()
+    )
+  )
+  (convblock3): ConvBlock(
+    (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (gap): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Flatten(full=False)
+  )
+  (concat): Concat(dim=1)
+  (fc): Linear(in_features=228, out_features=12, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnn_fcnplus.html b/models.rnn_fcnplus.html new file mode 100644 index 000000000..d35ba6a94 --- /dev/null +++ b/models.rnn_fcnplus.html @@ -0,0 +1,1569 @@ + + + + + + + + + +tsai - RNN_FCNPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNN_FCNPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

MGRU_FCNPlus

+
+
 MGRU_FCNPlus (*args, se=16, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

MLSTM_FCNPlus

+
+
 MLSTM_FCNPlus (*args, se=16, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

MRNN_FCNPlus

+
+
 MRNN_FCNPlus (*args, se=16, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

GRU_FCNPlus

+
+
 GRU_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,
+              rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,
+              bidirectional=False, shuffle=True, fc_dropout=0.0,
+              use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],
+              se=0, custom_head=None)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

LSTM_FCNPlus

+
+
 LSTM_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,
+               rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,
+               bidirectional=False, shuffle=True, fc_dropout=0.0,
+               use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],
+               se=0, custom_head=None)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

RNN_FCNPlus

+
+
 RNN_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,
+              rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,
+              bidirectional=False, shuffle=True, fc_dropout=0.0,
+              use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],
+              se=0, custom_head=None)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.models.utils import count_parameters
+from tsai.models.RNN_FCN import *
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+test_eq(RNN_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(LSTM_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(MLSTM_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])
+test_eq(GRU_FCNPlus(n_vars, c_out, shuffle=False)(xb).shape, [bs, c_out])
+test_eq(GRU_FCNPlus(n_vars, c_out, seq_len, shuffle=False)(xb).shape, [bs, c_out])
+test_eq(count_parameters(LSTM_FCNPlus(n_vars, c_out, seq_len)), count_parameters(LSTM_FCN(n_vars, c_out, seq_len)))
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+custom_head = nn.Linear(228, c_out)
+test_eq(RNN_FCNPlus(n_vars, c_out, seq_len, custom_head=custom_head)(xb).shape, [bs, c_out])
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+d = 10
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+test_eq(RNN_FCNPlus(n_vars, c_out, seq_len, d=d)(xb).shape, [bs, d, c_out])
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+d = (5, 3)
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+test_eq(RNN_FCNPlus(n_vars, c_out, seq_len, d=d)(xb).shape, [bs, *d, c_out])
+
+
+
LSTM_FCNPlus(n_vars, seq_len, c_out, se=8)
+
+
LSTM_FCNPlus(
+  (backbone): _RNN_FCN_Base_Backbone(
+    (rnn): LSTM(2, 100, batch_first=True)
+    (rnn_dropout): Dropout(p=0.8, inplace=False)
+    (convblock1): ConvBlock(
+      (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (se1): SqueezeExciteBlock(
+      (avg_pool): GAP1d(
+        (gap): AdaptiveAvgPool1d(output_size=1)
+        (flatten): Reshape(bs)
+      )
+      (fc): Sequential(
+        (0): Linear(in_features=128, out_features=16, bias=False)
+        (1): ReLU()
+        (2): Linear(in_features=16, out_features=128, bias=False)
+        (3): Sigmoid()
+      )
+    )
+    (convblock2): ConvBlock(
+      (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
+      (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (se2): SqueezeExciteBlock(
+      (avg_pool): GAP1d(
+        (gap): AdaptiveAvgPool1d(output_size=1)
+        (flatten): Reshape(bs)
+      )
+      (fc): Sequential(
+        (0): Linear(in_features=256, out_features=32, bias=False)
+        (1): ReLU()
+        (2): Linear(in_features=32, out_features=256, bias=False)
+        (3): Sigmoid()
+      )
+    )
+    (convblock3): ConvBlock(
+      (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (gap): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+    (concat): Concat(dim=1)
+  )
+  (head): Sequential(
+    (0): Linear(in_features=228, out_features=12, bias=True)
+  )
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnnattention.html b/models.rnnattention.html new file mode 100644 index 000000000..71c0fc5d6 --- /dev/null +++ b/models.rnnattention.html @@ -0,0 +1,1416 @@ + + + + + + + + + +tsai - RNNAttention + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNNAttention

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an custom PyTorch implementation by @yangtzech, based on TST implementation of Ignacio Oguiza.

+
+

Arguments

+

Usual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers. And some parameters are necessary for the RNN part.

+

The default values are the ones selected as a default configuration in the latter.

+
    +
  • c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var
  • +
  • c_out: the number of target classes. dls.c
  • +
  • seq_len: number of time steps in the time series. dls.len
  • +
  • hidden_size: the number of features in the hidden state in the RNN model. Default: 128.
  • +
  • rnn_layers: the number of recurrent layers of the RNN model. Default: 1.
  • +
  • bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True
  • +
  • rnn_dropout: If non-zero, introduces a Dropout layer on the outputs of each RNN layer except the last layer, with dropout probability equal to :attr:rnn_dropout. Default: 0
  • +
  • bidirectional: If True, becomes a bidirectional RNN. Default: False
  • +
  • n_heads: parallel attention heads. Usual values: 8-16. Default: 16.
  • +
  • d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.
  • +
  • encoder_dropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.
  • +
  • act: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.
  • +
  • encoder_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.
  • +
  • fc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.
  • +
  • y_range: range of possible y values (used in regression tasks). Default: None
  • +
  • kwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series.
  • +
+
+
+

Imports

+
+
+

RNNAttention

+
+
t = torch.rand(16, 50, 128)
+output, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)
+output.shape, attn.shape
+
+
(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))
+
+
+
+
t = torch.rand(16, 50, 128)
+output = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)
+output.shape
+
+
torch.Size([16, 50, 128])
+
+
+
+

source

+
+

GRUAttention

+
+
 GRUAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,
+               rnn_layers=1, bias=True, rnn_dropout=0,
+               bidirectional=False, encoder_layers:int=3, n_heads:int=16,
+               d_k:Optional[int]=None, d_v:Optional[int]=None,
+               d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',
+               fc_dropout:float=0.0, y_range:Optional[tuple]=None,
+               verbose:bool=False, custom_head=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

LSTMAttention

+
+
 LSTMAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,
+                rnn_layers=1, bias=True, rnn_dropout=0,
+                bidirectional=False, encoder_layers:int=3, n_heads:int=16,
+                d_k:Optional[int]=None, d_v:Optional[int]=None,
+                d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',
+                fc_dropout:float=0.0, y_range:Optional[tuple]=None,
+                verbose:bool=False, custom_head=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

RNNAttention

+
+
 RNNAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,
+               rnn_layers=1, bias=True, rnn_dropout=0,
+               bidirectional=False, encoder_layers:int=3, n_heads:int=16,
+               d_k:Optional[int]=None, d_v:Optional[int]=None,
+               d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',
+               fc_dropout:float=0.0, y_range:Optional[tuple]=None,
+               verbose:bool=False, custom_head=None)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 500
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+hidden_size = 128
+rnn_layers=1
+bias=True
+rnn_dropout=0
+bidirectional=False
+encoder_layers=3
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+encoder_dropout = 0.1
+act = "gelu"
+fc_dropout = 0.1
+kwargs = {}
+
+model = RNNAttention(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,
+            encoder_layers=encoder_layers, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, 
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 541698
+
+
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+hidden_size = 128
+rnn_layers=1
+bias=True
+rnn_dropout=0
+bidirectional=False
+encoder_layers=3
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+encoder_dropout = 0.1
+act = "gelu"
+fc_dropout = 0.1
+kwargs = {}
+# kwargs = dict(kernel_size=5, padding=2)
+
+model = RNNAttention(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,
+            encoder_layers=encoder_layers, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, 
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 429058
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnnattentionplus.html b/models.rnnattentionplus.html new file mode 100644 index 000000000..987ee8216 --- /dev/null +++ b/models.rnnattentionplus.html @@ -0,0 +1,1958 @@ + + + + + + + + + +tsai - RNNAttentionPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNNAttentionPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an custom PyTorch implementation by @yangtzech, based on TST implementation of Ignacio Oguiza.

+
+

Arguments

+

Usual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers. And some parameters are necessary for the RNN part.

+

The default values are the ones selected as a default configuration in the latter.

+
    +
  • c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var
  • +
  • c_out: the number of target classes. dls.c
  • +
  • seq_len: number of time steps in the time series. dls.len
  • +
  • hidden_size: the number of features in the hidden state in the RNN model. Default: 128.
  • +
  • rnn_layers: the number of recurrent layers of the RNN model. Default: 1.
  • +
  • bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True
  • +
  • rnn_dropout: If non-zero, introduces a Dropout layer on the outputs of each RNN layer except the last layer, with dropout probability equal to :attr:rnn_dropout. Default: 0
  • +
  • bidirectional: If True, becomes a bidirectional RNN. Default: False
  • +
  • n_heads: parallel attention heads. Usual values: 8-16. Default: 16.
  • +
  • d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.
  • +
  • encoder_dropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.
  • +
  • act: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.
  • +
  • encoder_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.
  • +
  • fc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.
  • +
  • y_range: range of possible y values (used in regression tasks). Default: None
  • +
+
+
+

Imports

+
+
+

RNNAttentionPlus

+
+
t = torch.rand(16, 50, 128)
+output, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)
+output.shape, attn.shape
+
+
(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))
+
+
+
+
t = torch.rand(16, 50, 128)
+output = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)
+output.shape
+
+
torch.Size([16, 50, 128])
+
+
+
+

source

+
+

GRUAttentionPlus

+
+
 GRUAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                   hidden_size:int=128, rnn_layers:int=1, bias:bool=True,
+                   rnn_dropout:float=0, bidirectional=False,
+                   encoder_layers:int=3, n_heads:int=16,
+                   d_k:Optional[int]=None, d_v:Optional[int]=None,
+                   d_ff:int=256, encoder_dropout:float=0.1,
+                   act:str='gelu', fc_dropout:float=0.0,
+                   y_range:Optional[tuple]=None, custom_head=None,
+                   use_bn:bool=True, flatten:bool=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintthe number of features (aka variables, dimensions, channels) in the time series dataset.
c_outintthe number of target classes.
seq_lenintnumber of time steps in the time series.
dtupleNoneoutput shape (excluding batch dimension).
hidden_sizeint128the number of features in the hidden state h
rnn_layersint1the number of recurrent layers of the RNN model.
biasboolTrueIf False, then the layer does not use bias weights b_ih and b_hh.
rnn_dropoutfloat0rnn dropout applied to the output of each RNN layer except the last layer.
bidirectionalboolFalseIf True, becomes a bidirectional RNN. Default: False
encoder_layersint3the number of sub-encoder-layers in the encoder.
n_headsint16parallel attention heads.
d_ktyping.Optional[int]Nonesize of the learned linear projection of queries and keys in the MHA.
d_vtyping.Optional[int]Nonesize of the learned linear projection of values in the MHA.
d_ffint256the dimension of the feedforward network model.
encoder_dropoutfloat0.1amount of residual dropout applied in the encoder.
actstrgeluthe activation function of intermediate layer, relu or gelu.
fc_dropoutfloat0.0dropout applied to the final fully connected layer.
y_rangetyping.Optional[tuple]Nonerange of possible y values (used in regression tasks).
custom_headNoneTypeNonecustom head that will be applied to the model head (optional).
use_bnboolTrueindicates if batchnorm will be applied to the model head.
flattenboolTruethis will flatten the output of the encoder before applying the head if True.
+
+

source

+
+
+

LSTMAttentionPlus

+
+
 LSTMAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                    hidden_size:int=128, rnn_layers:int=1, bias:bool=True,
+                    rnn_dropout:float=0, bidirectional=False,
+                    encoder_layers:int=3, n_heads:int=16,
+                    d_k:Optional[int]=None, d_v:Optional[int]=None,
+                    d_ff:int=256, encoder_dropout:float=0.1,
+                    act:str='gelu', fc_dropout:float=0.0,
+                    y_range:Optional[tuple]=None, custom_head=None,
+                    use_bn:bool=True, flatten:bool=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintthe number of features (aka variables, dimensions, channels) in the time series dataset.
c_outintthe number of target classes.
seq_lenintnumber of time steps in the time series.
dtupleNoneoutput shape (excluding batch dimension).
hidden_sizeint128the number of features in the hidden state h
rnn_layersint1the number of recurrent layers of the RNN model.
biasboolTrueIf False, then the layer does not use bias weights b_ih and b_hh.
rnn_dropoutfloat0rnn dropout applied to the output of each RNN layer except the last layer.
bidirectionalboolFalseIf True, becomes a bidirectional RNN. Default: False
encoder_layersint3the number of sub-encoder-layers in the encoder.
n_headsint16parallel attention heads.
d_ktyping.Optional[int]Nonesize of the learned linear projection of queries and keys in the MHA.
d_vtyping.Optional[int]Nonesize of the learned linear projection of values in the MHA.
d_ffint256the dimension of the feedforward network model.
encoder_dropoutfloat0.1amount of residual dropout applied in the encoder.
actstrgeluthe activation function of intermediate layer, relu or gelu.
fc_dropoutfloat0.0dropout applied to the final fully connected layer.
y_rangetyping.Optional[tuple]Nonerange of possible y values (used in regression tasks).
custom_headNoneTypeNonecustom head that will be applied to the model head (optional).
use_bnboolTrueindicates if batchnorm will be applied to the model head.
flattenboolTruethis will flatten the output of the encoder before applying the head if True.
+
+

source

+
+
+

RNNAttentionPlus

+
+
 RNNAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                   hidden_size:int=128, rnn_layers:int=1, bias:bool=True,
+                   rnn_dropout:float=0, bidirectional=False,
+                   encoder_layers:int=3, n_heads:int=16,
+                   d_k:Optional[int]=None, d_v:Optional[int]=None,
+                   d_ff:int=256, encoder_dropout:float=0.1,
+                   act:str='gelu', fc_dropout:float=0.0,
+                   y_range:Optional[tuple]=None, custom_head=None,
+                   use_bn:bool=True, flatten:bool=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintthe number of features (aka variables, dimensions, channels) in the time series dataset.
c_outintthe number of target classes.
seq_lenintnumber of time steps in the time series.
dtupleNoneoutput shape (excluding batch dimension).
hidden_sizeint128the number of features in the hidden state h
rnn_layersint1the number of recurrent layers of the RNN model.
biasboolTrueIf False, then the layer does not use bias weights b_ih and b_hh.
rnn_dropoutfloat0rnn dropout applied to the output of each RNN layer except the last layer.
bidirectionalboolFalseIf True, becomes a bidirectional RNN. Default: False
encoder_layersint3the number of sub-encoder-layers in the encoder.
n_headsint16parallel attention heads.
d_ktyping.Optional[int]Nonesize of the learned linear projection of queries and keys in the MHA.
d_vtyping.Optional[int]Nonesize of the learned linear projection of values in the MHA.
d_ffint256the dimension of the feedforward network model.
encoder_dropoutfloat0.1amount of residual dropout applied in the encoder.
actstrgeluthe activation function of intermediate layer, relu or gelu.
fc_dropoutfloat0.0dropout applied to the final fully connected layer.
y_rangetyping.Optional[tuple]Nonerange of possible y values (used in regression tasks).
custom_headNoneTypeNonecustom head that will be applied to the model head (optional).
use_bnboolTrueindicates if batchnorm will be applied to the model head.
flattenboolTruethis will flatten the output of the encoder before applying the head if True.
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 500
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+hidden_size = 128
+rnn_layers=1
+bias=True
+rnn_dropout=0
+bidirectional=False
+encoder_layers=3
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+encoder_dropout = 0.1
+act = "gelu"
+fc_dropout = 0.1
+kwargs = {}
+
+model = RNNAttentionPlus(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,
+            encoder_layers=encoder_layers, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, 
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 541698
+
+
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+hidden_size = 128
+rnn_layers=1
+bias=True
+rnn_dropout=0
+bidirectional=False
+encoder_layers=3
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+encoder_dropout = 0.1
+act = "gelu"
+fc_dropout = 0.1
+kwargs = {}
+# kwargs = dict(kernel_size=5, padding=2)
+
+model = RNNAttentionPlus(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,
+            encoder_layers=encoder_layers, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, 
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 429058
+
+
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+d = 10
+
+xb = torch.randn(bs, c_in, seq_len)
+model = RNNAttentionPlus(c_in, c_out, seq_len, d=d)
+test_eq(model.to(xb.device)(xb).shape, [bs, d, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 567572
+
+
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+d = (3, 10)
+
+xb = torch.randn(bs, c_in, seq_len)
+model = RNNAttentionPlus(c_in, c_out, seq_len, d=d)
+test_eq(model.to(xb.device)(xb).shape, [bs, *d, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 874812
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rnnplus.html b/models.rnnplus.html new file mode 100644 index 000000000..b1c058c2a --- /dev/null +++ b/models.rnnplus.html @@ -0,0 +1,1728 @@ + + + + + + + + + +tsai - RNNPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

RNNPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

These are RNN, LSTM and GRU PyTorch implementations created by Ignacio Oguiza - oguiza@timeseriesAI.co

+

The idea of including a feature extractor to the RNN network comes from the solution developed by the UPSTAGE team (https://www.kaggle.com/songwonho, https://www.kaggle.com/limerobot and https://www.kaggle.com/jungikhyo). They finished in 3rd position in Kaggle’s Google Brain - Ventilator Pressure Prediction competition. They used a Conv1d + Stacked LSTM architecture.

+
+

source

+
+

GRUPlus

+
+
 GRUPlus (c_in, c_out, seq_len=None, hidden_size=[100], n_layers=1,
+          bias=True, rnn_dropout=0, bidirectional=False,
+          n_cat_embeds=None, cat_embed_dims=None, cat_padding_idxs=None,
+          cat_pos=None, feature_extractor=None, fc_dropout=0.0,
+          last_step=True, bn=False, custom_head=None, y_range=None,
+          init_weights=True, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

LSTMPlus

+
+
 LSTMPlus (c_in, c_out, seq_len=None, hidden_size=[100], n_layers=1,
+           bias=True, rnn_dropout=0, bidirectional=False,
+           n_cat_embeds=None, cat_embed_dims=None, cat_padding_idxs=None,
+           cat_pos=None, feature_extractor=None, fc_dropout=0.0,
+           last_step=True, bn=False, custom_head=None, y_range=None,
+           init_weights=True, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

RNNPlus

+
+
 RNNPlus (c_in, c_out, seq_len=None, hidden_size=[100], n_layers=1,
+          bias=True, rnn_dropout=0, bidirectional=False,
+          n_cat_embeds=None, cat_embed_dims=None, cat_padding_idxs=None,
+          cat_pos=None, feature_extractor=None, fc_dropout=0.0,
+          last_step=True, bn=False, custom_head=None, y_range=None,
+          init_weights=True, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
bs = 16
+c_in = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, c_in, seq_len)
+test_eq(RNNPlus(c_in, c_out)(xb).shape, [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, 
+        [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, hidden_size=[100, 50, 10], bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, 
+        [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, hidden_size=[100], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, 
+        [bs, c_out])
+test_eq(LSTMPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, 
+        [bs, c_out])
+test_eq(GRUPlus(c_in, c_out, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5)(xb).shape, 
+        [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
+test_eq(RNNPlus(c_in, c_out, seq_len, hidden_size=100, n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, fc_dropout=0.5, 
+                last_step=False)(xb).shape, 
+        [bs, c_out])
+test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
+test_eq(GRUPlus(c_in, c_out, seq_len, last_step=False)(xb).shape, [bs, c_out])
+
+
+
feature_extractor = MultiConv1d(c_in, kss=[1,3,5,7])
+custom_head = nn.Sequential(Transpose(1,2), nn.Linear(8,8), nn.SELU(), nn.Linear(8, 1), Squeeze())
+test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[32,16,8,4], bidirectional=True, 
+                 feature_extractor=feature_extractor, custom_head=custom_head)(xb).shape, [bs, seq_len])
+feature_extractor = MultiConv1d(c_in, kss=[1,3,5,7], keep_original=True)
+custom_head = nn.Sequential(Transpose(1,2), nn.Linear(8,8), nn.SELU(), nn.Linear(8, 1), Squeeze())
+test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[32,16,8,4], bidirectional=True, 
+                 feature_extractor=feature_extractor, custom_head=custom_head)(xb).shape, [bs, seq_len])
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
bs = 16
+c_in = 3
+seq_len = 12
+c_out = 2
+x1 = torch.rand(bs,1,seq_len)
+x2 = torch.randint(0,3,(bs,1,seq_len))
+x3 = torch.randint(0,5,(bs,1,seq_len))
+xb = torch.cat([x1,x2,x3],1)
+
+custom_head = partial(create_mlp_head, fc_dropout=0.5)
+test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
+custom_head = partial(create_pool_head, concat_pool=True, fc_dropout=0.5)
+test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
+custom_head = partial(create_pool_plus_head, fc_dropout=0.5)
+test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
+custom_head = partial(create_conv_head)
+test_eq(LSTMPlus(c_in, c_out, seq_len, last_step=False, custom_head=custom_head)(xb).shape, [bs, c_out])
+test_eq(LSTMPlus(c_in, c_out, seq_len, hidden_size=[100, 50], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True)(xb).shape, 
+        [bs, c_out])
+
+n_cat_embeds = [3, 5]
+cat_pos = [1, 2]
+custom_head = partial(create_conv_head)
+m = LSTMPlus(c_in, c_out, seq_len, hidden_size=[100, 50], n_layers=2, bias=True, rnn_dropout=0.2, bidirectional=True, 
+             n_cat_embeds=n_cat_embeds, cat_pos=cat_pos)
+test_eq(m(xb).shape, [bs, c_out])
+
+
+
from tsai.data.all import *
+from tsai.models.utils import *
+
+
+
dsid = 'NATOPS' 
+bs = 16
+X, y, splits = get_UCR_data(dsid, return_split=False)
+tfms  = [None, [Categorize()]]
+dls = get_ts_dls(X, y, tfms=tfms, splits=splits, bs=bs)
+
+
+
model = build_ts_model(LSTMPlus, dls=dls)
+print(model[-1])
+learn = Learner(dls, model,  metrics=accuracy)
+learn.fit_one_cycle(1, 3e-3)
+
+
Sequential(
+  (0): LastStep()
+  (1): Linear(in_features=100, out_features=6, bias=True)
+)
+
+
+ + +
+
+
+
model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False)
+learn = Learner(dls, model,  metrics=accuracy)
+learn.fit_one_cycle(1, 3e-3)
+
+ + +
+
+ +
+ + 0.00% [0/1 00:00<?] +
+ + + + + + + + + + + + + + +
epochtrain_lossvalid_lossaccuracytime
+

+ +

+ + 0.00% [0/11 00:00<?] +
+ +
+
+
+
custom_head = partial(create_pool_head, concat_pool=True)
+model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False, custom_head=custom_head)
+learn = Learner(dls, model,  metrics=accuracy)
+learn.fit_one_cycle(1, 3e-3)
+
+
+
custom_head = partial(create_pool_plus_head, concat_pool=True)
+model = LSTMPlus(dls.vars, dls.c, dls.len, last_step=False, custom_head=custom_head)
+learn = Learner(dls, model,  metrics=accuracy)
+learn.fit_one_cycle(1, 3e-3)
+
+ + +
+
+
+
m = RNNPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
RNNPlus(
+  (backbone): _RNN_Backbone(
+    (to_cat_embed): Identity()
+    (feature_extractor): Identity()
+    (rnn): Sequential(
+      (0): RNN(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+      (1): LSTMOutput()
+    )
+    (transpose): Transpose(dims=-1, -2).contiguous()
+  )
+  (head): Sequential(
+    (0): LastStep()
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=200, out_features=2, bias=True)
+  )
+)
+81802
+
+
+
torch.Size([16, 2])
+
+
+
+
m = LSTMPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
LSTMPlus(
+  (backbone): _RNN_Backbone(
+    (to_cat_embed): Identity()
+    (feature_extractor): Identity()
+    (rnn): Sequential(
+      (0): LSTM(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+      (1): LSTMOutput()
+    )
+    (transpose): Transpose(dims=-1, -2).contiguous()
+  )
+  (head): Sequential(
+    (0): LastStep()
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=200, out_features=2, bias=True)
+  )
+)
+326002
+
+
+
torch.Size([16, 2])
+
+
+
+
m = GRUPlus(c_in, c_out, seq_len, hidden_size=100,n_layers=2,bidirectional=True,rnn_dropout=.5,fc_dropout=.5)
+print(m)
+print(count_parameters(m))
+m(xb).shape
+
+
GRUPlus(
+  (backbone): _RNN_Backbone(
+    (to_cat_embed): Identity()
+    (feature_extractor): Identity()
+    (rnn): Sequential(
+      (0): GRU(3, 100, num_layers=2, batch_first=True, dropout=0.5, bidirectional=True)
+      (1): LSTMOutput()
+    )
+    (transpose): Transpose(dims=-1, -2).contiguous()
+  )
+  (head): Sequential(
+    (0): LastStep()
+    (1): Dropout(p=0.5, inplace=False)
+    (2): Linear(in_features=200, out_features=2, bias=True)
+  )
+)
+244602
+
+
+
torch.Size([16, 2])
+
+
+
+
+

Converting a model to TorchScript

+
+
model = GRUPlus(c_in, c_out, hidden_size=100, n_layers=2, bidirectional=True, rnn_dropout=.5, fc_dropout=.5)
+model.eval()
+inp = torch.rand(1, c_in, 50)
+output = model(inp)
+print(output)
+
+
tensor([[-0.0677, -0.0857]], grad_fn=<AddmmBackward0>)
+
+
+
+

Tracing

+
+
# save to gpu, cpu or both
+traced_cpu = torch.jit.trace(model.cpu(), inp)
+print(traced_cpu)
+torch.jit.save(traced_cpu, "cpu.pt")
+
+# load cpu or gpu model
+traced_cpu = torch.jit.load("cpu.pt")
+test_eq(traced_cpu(inp), output)
+
+!rm "cpu.pt"
+
+
GRUPlus(
+  original_name=GRUPlus
+  (backbone): _RNN_Backbone(
+    original_name=_RNN_Backbone
+    (to_cat_embed): Identity(original_name=Identity)
+    (feature_extractor): Identity(original_name=Identity)
+    (rnn): Sequential(
+      original_name=Sequential
+      (0): GRU(original_name=GRU)
+      (1): LSTMOutput(original_name=LSTMOutput)
+    )
+    (transpose): Transpose(original_name=Transpose)
+  )
+  (head): Sequential(
+    original_name=Sequential
+    (0): LastStep(original_name=LastStep)
+    (1): Dropout(original_name=Dropout)
+    (2): Linear(original_name=Linear)
+  )
+)
+
+
+
+
+
+

Converting a model to ONNX

+
import onnx
+
+torch.onnx.export(model.cpu(),               # model being run
+                  inp,                       # model input (or a tuple for multiple inputs)
+                  "cpu.onnx",                # where to save the model (can be a file or file-like object)
+                  export_params=True,        # store the trained parameter weights inside the model file
+                  verbose=False,
+                  opset_version=13,          # the ONNX version to export the model to
+                  do_constant_folding=True,  # whether to execute constant folding for optimization
+                  input_names = ['input'],   # the model's input names
+                  output_names = ['output'], # the model's output names
+                  dynamic_axes={
+                      'input'  : {0 : 'batch_size'}, 
+                      'output' : {0 : 'batch_size'}} # variable length axes
+                 )
+
+
+onnx_model = onnx.load("cpu.onnx")           # Load the model and check it's ok
+onnx.checker.check_model(onnx_model)
+
import onnxruntime as ort
+
+ort_sess = ort.InferenceSession('cpu.onnx')
+out = ort_sess.run(None, {'input': inp.numpy()})
+
+input_name = ort_sess.get_inputs()[0].name
+output_name = ort_sess.get_outputs()[0].name
+input_dims = ort_sess.get_inputs()[0].shape
+
+test_close(out, output.detach().numpy())
+!rm "cpu.onnx"
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rocket.html b/models.rocket.html new file mode 100644 index 000000000..fa3abd70f --- /dev/null +++ b/models.rocket.html @@ -0,0 +1,1354 @@ + + + + + + + + + +tsai - ROCKET + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ROCKET

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

ROCKET (RandOm Convolutional KErnel Transform) functions for univariate and multivariate time series.

+
+
+

source

+
+

RocketClassifier

+
+
 RocketClassifier (num_kernels=10000, normalize_input=True,
+                   random_state=None, alphas=array([1.e-03, 1.e-02,
+                   1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),
+                   normalize_features=True, memory=None, verbose=False,
+                   scoring=None, class_weight=None, **kwargs)
+
+

Time series classification using ROCKET features and a linear classifier

+
+

source

+
+
+

load_rocket

+
+
 load_rocket (fname='Rocket', path='./models')
+
+
+

source

+
+
+

RocketRegressor

+
+
 RocketRegressor (num_kernels=10000, normalize_input=True,
+                  random_state=None, alphas=array([1.e-03, 1.e-02, 1.e-01,
+                  1.e+00, 1.e+01, 1.e+02, 1.e+03]),
+                  normalize_features=True, memory=None, verbose=False,
+                  scoring=None, **kwargs)
+
+

Time series regression using ROCKET features and a linear regressor

+
+
# Univariate classification with sklearn-type API
+dsid = 'OliveOil'
+fname = 'RocketClassifier'
+X_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')
+cls = RocketClassifier()
+cls.fit(X_train, y_train)
+cls.save(fname)
+del cls
+cls = load_rocket(fname)
+print(cls.score(X_test, y_test))
+
+
OMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.
+
+
+
0.9
+
+
+
+
# Multivariate classification with sklearn-type API
+dsid = 'NATOPS'
+fname = 'RocketClassifier'
+X_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')
+cls = RocketClassifier()
+cls.fit(X_train, y_train)
+cls.save(fname)
+del cls
+cls = load_rocket(fname)
+print(cls.score(X_test, y_test))
+
+
0.8666666666666667
+
+
+
+
from sklearn.metrics import mean_squared_error
+
+
+
# Univariate regression with sklearn-type API
+dsid = 'Covid3Month'
+fname = 'RocketRegressor'
+X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')
+if X_train is not None: 
+    rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
+    reg = RocketRegressor(scoring=rmse_scorer)
+    reg.fit(X_train, y_train)
+    reg.save(fname)
+    del reg
+    reg = load_rocket(fname)
+    y_pred = reg.predict(X_test)
+    print(mean_squared_error(y_test, y_pred, squared=False))
+
+
0.03908714523468997
+
+
+
+
# Multivariate regression with sklearn-type API
+dsid = 'AppliancesEnergy'
+fname = 'RocketRegressor'
+X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')
+if X_train is not None: 
+    rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
+    reg = RocketRegressor(scoring=rmse_scorer)
+    reg.fit(X_train, y_train)
+    reg.save(fname)
+    del reg
+    reg = load_rocket(fname)
+    y_pred = reg.predict(X_test)
+    print(mean_squared_error(y_test, y_pred, squared=False))
+
+
2.287302226812576
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.rocket_pytorch.html b/models.rocket_pytorch.html new file mode 100644 index 000000000..3db0bb419 --- /dev/null +++ b/models.rocket_pytorch.html @@ -0,0 +1,1294 @@ + + + + + + + + + +tsai - ROCKET Pytorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

ROCKET Pytorch

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

ROCKET (RandOm Convolutional KErnel Transform) functions for univariate and multivariate time series developed in Pytorch.

+
+
+

source

+
+

ROCKET

+
+
 ROCKET (c_in, seq_len, n_kernels=10000, kss=[7, 9, 11], device=None,
+         verbose=False)
+
+

RandOm Convolutional KErnel Transform

+

ROCKET is a GPU Pytorch implementation of the ROCKET functions generate_kernels and apply_kernels that can be used with univariate and multivariate time series.

+
+

source

+
+
+

create_rocket_features

+
+
 create_rocket_features (dl, model, verbose=False)
+
+

Args: model : ROCKET model instance dl : single TSDataLoader (for example dls.train or dls.valid)

+
+
bs = 16
+c_in = 7  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 15
+xb = torch.randn(bs, c_in, seq_len).to(default_device())
+
+m = ROCKET(c_in, seq_len, n_kernels=1_000, kss=[7, 9, 11]) # 1_000 for testing with a cpu. Default is 10k with a gpu!
+test_eq(m(xb).shape, [bs, 2_000])
+
+
+
from tsai.data.all import *
+from tsai.models.utils import *
+
+
+
X, y, splits = get_UCR_data('OliveOil', split_data=False)
+tfms = [None, TSRegression()]
+batch_tfms = TSStandardize(by_var=True)
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, shuffle_train=False, drop_last=False)
+model = build_ts_model(ROCKET, dls=dls, n_kernels=1_000) # 1_000 for testing with a cpu. Default is 10k with a gpu!
+X_train, y_train = create_rocket_features(dls.train, model) 
+X_valid, y_valid = create_rocket_features(dls.valid, model)
+X_train.shape, X_valid.shape
+
+
((30, 2000), (30, 2000))
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tabfusiontransformer.html b/models.tabfusiontransformer.html new file mode 100644 index 000000000..b53e99097 --- /dev/null +++ b/models.tabfusiontransformer.html @@ -0,0 +1,1378 @@ + + + + + + + + + +tsai - TabFusionTransformer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TabFusionTransformer

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is a a Pytorch implementeation of TabTransformerTransformer created by Ignacio Oguiza (oguiza@timeseriesAI.co)

+

This implementation is inspired by:

+

Huang, X., Khetan, A., Cvitkovic, M., & Karnin, Z. (2020). TabTransformer: Tabular Data Modeling Using Contextual Embeddings. arXiv preprint https://arxiv.org/pdf/2012.06678

+

Official repo: https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer

+
+

source

+
+

TabFusionTransformer

+
+
 TabFusionTransformer (classes, cont_names, c_out, d_model=32, n_layers=6,
+                       n_heads=8, d_k=None, d_v=None, d_ff=None,
+                       res_attention=True, attention_act='gelu',
+                       res_dropout=0.0, fc_mults=(4, 2), fc_dropout=0.0,
+                       fc_act=None, fc_skip=False, fc_bn=False,
+                       bn_final=False, init=True)
+
+

Class that allows you to pass one or multiple inputs

+
+

source

+
+
+

TabFusionBackbone

+
+
 TabFusionBackbone (classes, cont_names, d_model=32, n_layers=6,
+                    n_heads=8, d_k=None, d_v=None, d_ff=None, init=True,
+                    res_attention=True, attention_act='gelu',
+                    res_dropout=0.0)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

Sequential

+
+
 Sequential (*args)
+
+

Class that allows you to pass one or multiple inputs

+
+

source

+
+
+

ifnone

+
+
 ifnone (a, b)
+
+

b if a is None else a

+
+
from fastai.tabular.all import *
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary",
+    cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],
+    cont_names = ['age', 'fnlwgt', 'education-num'],
+    procs = [Categorify, FillMissing, Normalize])
+x_cat, x_cont, yb = first(dls.train)
+model = TabFusionTransformer(dls.classes, dls.cont_names, dls.c)
+test_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))
+
+
+

source

+
+
+

TSTabFusionTransformer

+
+
 TSTabFusionTransformer (c_in, c_out, seq_len, classes, cont_names,
+                         d_model=32, n_layers=6, n_heads=8, d_k=None,
+                         d_v=None, d_ff=None, res_attention=True,
+                         attention_act='gelu', res_dropout=0.0,
+                         fc_mults=(1, 0.5), fc_dropout=0.0, fc_act=None,
+                         fc_skip=False, fc_bn=False, bn_final=False,
+                         init=True)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+
classes = {'education': ['#na#', '10th', '11th', '12th', '1st-4th', '5th-6th', '7th-8th', '9th', 'Assoc-acdm', 'Assoc-voc', 'Bachelors', 'Doctorate', 
+                         'HS-grad', 'Masters', 'Preschool', 'Prof-school', 'Some-college'],
+ 'education-num_na': ['#na#', False, True],
+ 'marital-status': ['#na#', 'Divorced', 'Married-AF-spouse', 'Married-civ-spouse', 'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed'],
+ 'occupation': ['#na#', '?', 'Adm-clerical', 'Armed-Forces', 'Craft-repair', 'Exec-managerial', 'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct', 
+                'Other-service', 'Priv-house-serv', 'Prof-specialty', 'Protective-serv', 'Sales', 'Tech-support', 'Transport-moving'],
+ 'race': ['#na#', 'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White'],
+ 'relationship': ['#na#', 'Husband', 'Not-in-family', 'Other-relative', 'Own-child', 'Unmarried', 'Wife'],
+ 'workclass': ['#na#', '?', 'Federal-gov', 'Local-gov', 'Never-worked', 'Private', 'Self-emp-inc', 'Self-emp-not-inc', 'State-gov', 'Without-pay']}
+
+cont_names = ['a', 'b', 'c']
+c_out = 6
+x_ts = torch.randn(64, 3, 10)
+x_cat = torch.randint(0,3,(64,7))
+x_cont = torch.randn(64,3)
+model = TSTabFusionTransformer(x_ts.shape[1], c_out, x_ts.shape[-1], classes, cont_names)
+x = (x_ts, (x_cat, x_cont))
+test_eq(model(x).shape, (x_ts.shape[0], c_out))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tabmodel.html b/models.tabmodel.html new file mode 100644 index 000000000..7a5b194ff --- /dev/null +++ b/models.tabmodel.html @@ -0,0 +1,1409 @@ + + + + + + + + + +tsai - TabModel + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TabModel

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on fastai’s TabularModel.

+

We built it so that it’s easy to change the head of the model, something that is particularly interesting when building hybrid models.

+
+

source

+
+

TabHead

+
+
 TabHead (emb_szs, n_cont, c_out, layers=None, fc_dropout=None,
+          y_range=None, use_bn=True, bn_final=False, lin_first=False,
+          act=ReLU(inplace=True), skip=False)
+
+

Basic head for tabular data.

+
+

source

+
+
+

TabBackbone

+
+
 TabBackbone (emb_szs, n_cont, embed_p=0.0, bn_cont=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

TabModel

+
+
 TabModel (emb_szs, n_cont, c_out, layers=None, fc_dropout=None,
+           embed_p=0.0, y_range=None, use_bn=True, bn_final=False,
+           bn_cont=True, lin_first=False, act=ReLU(inplace=True),
+           skip=False)
+
+

Basic model for tabular data.

+
+
from fastai.tabular.core import *
+from tsai.data.tabular import *
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable
+procs = [Categorify, FillMissing, Normalize]
+cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
+cont_names = ['age', 'fnlwgt', 'education-num']
+y_names = ['salary']
+y_block = RegressionBlock() if isinstance(df['salary'].values[0], float) else CategoryBlock()
+splits = RandomSplitter()(range_of(df))
+pd.options.mode.chained_assignment=None
+to = TabularPandas(df, procs=procs, cat_names=cat_names, cont_names=cont_names, y_names=y_names, y_block=y_block, splits=splits, inplace=True, 
+                   reduce_memory=False)
+to.show(5)
+tab_dls = to.dataloaders(bs=16, val_bs=32)
+b = first(tab_dls.train)
+test_eq((b[0].shape, b[1].shape, b[2].shape), (torch.Size([16, 7]), torch.Size([16, 3]), torch.Size([16, 1])))
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
workclasseducationmarital-statusoccupationrelationshipraceeducation-num_naagefnlwgteducation-numsalary
20505PrivateHS-gradMarried-civ-spouseSalesHusbandWhiteFalse47.0197836.09.0<50k
28679PrivateHS-gradMarried-civ-spouseCraft-repairHusbandWhiteFalse28.065078.09.0>=50k
11669PrivateHS-gradNever-marriedAdm-clericalNot-in-familyWhiteFalse38.0202683.09.0<50k
29079Self-emp-not-incBachelorsMarried-civ-spouseProf-specialtyHusbandWhiteFalse41.0168098.013.0<50k
7061PrivateHS-gradMarried-civ-spouseAdm-clericalHusbandWhiteFalse31.0243442.09.0<50k
+
+
+
+
tab_model = build_tabular_model(TabModel, dls=tab_dls)
+b = first(tab_dls.train)
+test_eq(tab_model.to(b[0].device)(*b[:-1]).shape, (tab_dls.bs, tab_dls.c))
+learn = Learner(tab_dls, tab_model, splitter=ts_splitter)
+p1 = count_parameters(learn.model)
+learn.freeze()
+p2 = count_parameters(learn.model)
+learn.unfreeze()
+p3 = count_parameters(learn.model)
+assert p1 == p3
+assert p1 > p2 > 0
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tabtransformer.html b/models.tabtransformer.html new file mode 100644 index 000000000..cedaefbdb --- /dev/null +++ b/models.tabtransformer.html @@ -0,0 +1,1341 @@ + + + + + + + + + +tsai - TabTransformer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TabTransformer

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial TabTransformer Pytorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co)

+

Huang, X., Khetan, A., Cvitkovic, M., & Karnin, Z. (2020). TabTransformer: Tabular Data Modeling Using Contextual Embeddings. arXiv preprint https://arxiv.org/pdf/2012.06678

+

Official repo: https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer

+
+

source

+
+

TabTransformer

+
+
 TabTransformer (classes, cont_names, c_out, column_embed=True,
+                 add_shared_embed=False, shared_embed_div=8,
+                 embed_dropout=0.1, drop_whole_embed=False, d_model=32,
+                 n_layers=6, n_heads=8, d_k=None, d_v=None, d_ff=None,
+                 res_attention=True, attention_act='gelu',
+                 res_dropout=0.1, norm_cont=True, mlp_mults=(4, 2),
+                 mlp_dropout=0.0, mlp_act=None, mlp_skip=False,
+                 mlp_bn=False, bn_final=False)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

FullEmbeddingDropout

+
+
 FullEmbeddingDropout (dropout:float)
+
+

From https://github.com/jrzaurin/pytorch-widedeep/blob/be96b57f115e4a10fde9bb82c35380a3ac523f52/pytorch_widedeep/models/tab_transformer.py#L153

+
+

source

+
+
+

SharedEmbedding

+
+
 SharedEmbedding (num_embeddings, embedding_dim, shared_embed=True,
+                  add_shared_embed=False, shared_embed_div=8)
+
+

Base class for all neural network modules.

+

Your models should also subclass this class.

+

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

+
import torch.nn as nn
+import torch.nn.functional as F
+
+class Model(nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.conv1 = nn.Conv2d(1, 20, 5)
+        self.conv2 = nn.Conv2d(20, 20, 5)
+
+    def forward(self, x):
+        x = F.relu(self.conv1(x))
+        return F.relu(self.conv2(x))
+

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

+

.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.

+

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

+
+

source

+
+
+

ifnone

+
+
 ifnone (a, b)
+
+

b if a is None else a

+
+
from fastai.tabular.all import *
+
+
+
path = untar_data(URLs.ADULT_SAMPLE)
+df = pd.read_csv(path/'adult.csv')
+dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary",
+    cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],
+    cont_names = ['age', 'fnlwgt', 'education-num'],
+    procs = [Categorify, FillMissing, Normalize])
+x_cat, x_cont, yb = first(dls.train)
+model = TabTransformer(dls.classes, dls.cont_names, dls.c)
+test_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tcn.html b/models.tcn.html new file mode 100644 index 000000000..1a3e97718 --- /dev/null +++ b/models.tcn.html @@ -0,0 +1,1471 @@ + + + + + + + + + +tsai - TCN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TCN

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:

+ +
+

source

+
+

TCN

+
+
 TCN (c_in, c_out, layers=[25, 25, 25, 25, 25, 25, 25, 25], ks=7,
+      conv_dropout=0.0, fc_dropout=0.0)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

TemporalConvNet

+
+
 TemporalConvNet (c_in, layers, ks=2, dropout=0.0)
+
+
+

source

+
+
+

TemporalBlock

+
+
 TemporalBlock (ni, nf, ks, stride, dilation, padding, dropout=0.0)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+nvars = 3
+seq_len = 128
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = TCN(nvars, c_out, fc_dropout=.5)
+test_eq(model(xb).shape, (bs, c_out))
+model = TCN(nvars, c_out, conv_dropout=.2)
+test_eq(model(xb).shape, (bs, c_out))
+model = TCN(nvars, c_out)
+test_eq(model(xb).shape, (bs, c_out))
+model
+
+
TCN(
+  (tcn): Sequential(
+    (0): TemporalBlock(
+      (conv1): Conv1d(3, 25, kernel_size=(7,), stride=(1,), padding=(6,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(6,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(3, 25, kernel_size=(7,), stride=(1,), padding=(6,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(6,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (downsample): Conv1d(3, 25, kernel_size=(1,), stride=(1,))
+      (relu): ReLU()
+    )
+    (1): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (2): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (3): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (4): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (5): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (6): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+    (7): TemporalBlock(
+      (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))
+      (chomp1): Chomp1d()
+      (relu1): ReLU()
+      (dropout1): Dropout(p=0.0, inplace=False)
+      (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))
+      (chomp2): Chomp1d()
+      (relu2): ReLU()
+      (dropout2): Dropout(p=0.0, inplace=False)
+      (net): Sequential(
+        (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))
+        (1): Chomp1d()
+        (2): ReLU()
+        (3): Dropout(p=0.0, inplace=False)
+        (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))
+        (5): Chomp1d()
+        (6): ReLU()
+        (7): Dropout(p=0.0, inplace=False)
+      )
+      (relu): ReLU()
+    )
+  )
+  (gap): GAP1d(
+    (gap): AdaptiveAvgPool1d(output_size=1)
+    (flatten): Flatten(full=False)
+  )
+  (linear): Linear(in_features=25, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.transformermodel.html b/models.transformermodel.html new file mode 100644 index 000000000..ea8bd8b6f --- /dev/null +++ b/models.transformermodel.html @@ -0,0 +1,1323 @@ + + + + + + + + + +tsai - TransformerModel + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TransformerModel

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co

+
+
+

source

+
+

TransformerModel

+
+
 TransformerModel (c_in, c_out, d_model=64, n_head=1, d_ffn=128,
+                   dropout=0.1, activation='relu', n_layers=1)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+nvars = 3
+seq_len = 96
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+
+
+model = TransformerModel(nvars, c_out, d_model=64, n_head=1, d_ffn=128, dropout=0.1, activation='gelu', n_layers=3)
+test_eq(model(xb).shape, [bs, c_out])
+print(count_parameters(model))
+model
+
+
100930
+
+
+
TransformerModel(
+  (permute): Permute(dims=2, 0, 1)
+  (inlinear): Linear(in_features=3, out_features=64, bias=True)
+  (relu): ReLU()
+  (transformer_encoder): TransformerEncoder(
+    (layers): ModuleList(
+      (0): TransformerEncoderLayer(
+        (self_attn): MultiheadAttention(
+          (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)
+        )
+        (linear1): Linear(in_features=64, out_features=128, bias=True)
+        (dropout): Dropout(p=0.1, inplace=False)
+        (linear2): Linear(in_features=128, out_features=64, bias=True)
+        (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (dropout1): Dropout(p=0.1, inplace=False)
+        (dropout2): Dropout(p=0.1, inplace=False)
+      )
+      (1): TransformerEncoderLayer(
+        (self_attn): MultiheadAttention(
+          (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)
+        )
+        (linear1): Linear(in_features=64, out_features=128, bias=True)
+        (dropout): Dropout(p=0.1, inplace=False)
+        (linear2): Linear(in_features=128, out_features=64, bias=True)
+        (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (dropout1): Dropout(p=0.1, inplace=False)
+        (dropout2): Dropout(p=0.1, inplace=False)
+      )
+      (2): TransformerEncoderLayer(
+        (self_attn): MultiheadAttention(
+          (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)
+        )
+        (linear1): Linear(in_features=64, out_features=128, bias=True)
+        (dropout): Dropout(p=0.1, inplace=False)
+        (linear2): Linear(in_features=128, out_features=64, bias=True)
+        (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+        (dropout1): Dropout(p=0.1, inplace=False)
+        (dropout2): Dropout(p=0.1, inplace=False)
+      )
+    )
+    (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
+  )
+  (transpose): Transpose(1, 0)
+  (max): Max(dim=1, keepdim=False)
+  (outlinear): Linear(in_features=64, out_features=2, bias=True)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.transformerrnnplus.html b/models.transformerrnnplus.html new file mode 100644 index 000000000..e4cab1309 --- /dev/null +++ b/models.transformerrnnplus.html @@ -0,0 +1,1784 @@ + + + + + + + + + +tsai - TransformerRNNPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TransformerRNNPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

These is a Pytorch implementation of a Transformer + RNN created by Ignacio Oguiza - oguiza@timeseriesAI.co inspired by the code created by Baurzhan Urazalinov (https://www.kaggle.com/baurzhanurazalinov).

+

Baurzhan Urazalinov won a Kaggle competition (Parkinson’s Freezing of Gait Prediction: Event detection from wearable sensor data - 2023) using the following original tensorflow code:

+ +

I’d like to congratulate Baurzhan for winning this competition, and for sharing the code he used.

+
+
from tsai.models.utils import count_parameters
+
+
+
t = torch.rand(4, 864, 54)
+encoder_layer = torch.nn.TransformerEncoderLayer(54, 6, dim_feedforward=2048, dropout=0.1, 
+                                                 activation="relu", layer_norm_eps=1e-05, 
+                                                 batch_first=True, norm_first=False)
+print(encoder_layer(t).shape)
+print(count_parameters(encoder_layer))
+
+
torch.Size([4, 864, 54])
+235382
+
+
+
+
bs = 4
+c_in = 5
+seq_len = 50
+
+encoder = _TransformerRNNEncoder(nn.LSTM, c_in=c_in, seq_len=seq_len, d_model=128, nhead=4, num_encoder_layers=1, dim_feedforward=None, proj_dropout=0.1, dropout=0.1, num_rnn_layers=3, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+print(encoder(t).shape)
+
+
torch.Size([4, 1024, 50])
+
+
+
+

source

+
+

TransformerGRUPlus

+
+
 TransformerGRUPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                     d_model:int=128, nhead:int=16,
+                     proj_dropout:float=0.1, num_encoder_layers:int=1,
+                     dim_feedforward:int=2048, dropout:float=0.1,
+                     num_rnn_layers:int=1, bidirectional:bool=True,
+                     custom_head=None, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintNumber of channels in the input tensor.
c_outintNumber of output channels.
seq_lenintNumber of time steps in the input tensor.
dtupleNoneint or tuple with shape of the output tensor
d_modelint128Total dimension of the model.
nheadint16Number of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).
proj_dropoutfloat0.1Dropout probability after the first linear layer. Default: 0.1.
num_encoder_layersint1Number of transformer encoder layers. Default: 1.
dim_feedforwardint2048The dimension of the feedforward network model. Default: 2048.
dropoutfloat0.1Transformer encoder layers dropout. Default: 0.1.
num_rnn_layersint1Number of RNN layers in the encoder. Default: 1.
bidirectionalboolTrueIf True, becomes a bidirectional RNN. Default: True.
custom_headNoneTypeNoneCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.
kwargs
+
+

source

+
+
+

TransformerLSTMPlus

+
+
 TransformerLSTMPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                      d_model:int=128, nhead:int=16,
+                      proj_dropout:float=0.1, num_encoder_layers:int=1,
+                      dim_feedforward:int=2048, dropout:float=0.1,
+                      num_rnn_layers:int=1, bidirectional:bool=True,
+                      custom_head=None, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintNumber of channels in the input tensor.
c_outintNumber of output channels.
seq_lenintNumber of time steps in the input tensor.
dtupleNoneint or tuple with shape of the output tensor
d_modelint128Total dimension of the model.
nheadint16Number of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).
proj_dropoutfloat0.1Dropout probability after the first linear layer. Default: 0.1.
num_encoder_layersint1Number of transformer encoder layers. Default: 1.
dim_feedforwardint2048The dimension of the feedforward network model. Default: 2048.
dropoutfloat0.1Transformer encoder layers dropout. Default: 0.1.
num_rnn_layersint1Number of RNN layers in the encoder. Default: 1.
bidirectionalboolTrueIf True, becomes a bidirectional RNN. Default: True.
custom_headNoneTypeNoneCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.
kwargs
+
+

source

+
+
+

TransformerRNNPlus

+
+
 TransformerRNNPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,
+                     d_model:int=128, nhead:int=16,
+                     proj_dropout:float=0.1, num_encoder_layers:int=1,
+                     dim_feedforward:int=2048, dropout:float=0.1,
+                     num_rnn_layers:int=1, bidirectional:bool=True,
+                     custom_head=None, **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_inintNumber of channels in the input tensor.
c_outintNumber of output channels.
seq_lenintNumber of time steps in the input tensor.
dtupleNoneint or tuple with shape of the output tensor
d_modelint128Total dimension of the model.
nheadint16Number of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).
proj_dropoutfloat0.1Dropout probability after the first linear layer. Default: 0.1.
num_encoder_layersint1Number of transformer encoder layers. Default: 1.
dim_feedforwardint2048The dimension of the feedforward network model. Default: 2048.
dropoutfloat0.1Transformer encoder layers dropout. Default: 0.1.
num_rnn_layersint1Number of RNN layers in the encoder. Default: 1.
bidirectionalboolTrueIf True, becomes a bidirectional RNN. Default: True.
custom_headNoneTypeNoneCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.
kwargs
+
+
bs = 4
+c_in = 5
+c_out = 1
+seq_len = 50
+d = None
+
+model = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == torch.Size([4]) 
+print(model(t).shape)
+
+model = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == torch.Size([4])
+print(model(t).shape)
+
+model = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == torch.Size([4])
+print(model(t).shape)
+
+
torch.Size([4])
+torch.Size([4])
+torch.Size([4])
+
+
+
+
bs = 4
+c_in = 5
+c_out = 3
+seq_len = 50
+d = None
+
+model = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, c_out)
+print(model(t).shape)
+
+model = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, c_out)
+print(model(t).shape)
+
+model = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, c_out)
+print(model(t).shape)
+
+
torch.Size([4, 3])
+torch.Size([4, 3])
+torch.Size([4, 3])
+
+
+
+
bs = 4
+c_in = 5
+c_out = 3
+seq_len = 50
+d = 50
+
+model = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, d, c_out)
+print(model(t).shape)
+
+model = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, d, c_out)
+print(model(t).shape)
+
+model = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)
+t = torch.randn(bs, c_in, seq_len)
+assert model(t).shape == (bs, d, c_out)
+print(model(t).shape)
+
+
torch.Size([4, 50, 3])
+torch.Size([4, 50, 3])
+torch.Size([4, 50, 3])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tsitplus.html b/models.tsitplus.html new file mode 100644 index 000000000..3dd9596fa --- /dev/null +++ b/models.tsitplus.html @@ -0,0 +1,1434 @@ + + + + + + + + + +tsai - TSiT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TSiT

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is a PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on ViT (Vision Transformer):

+

Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., … & Houlsby, N. (2020).

+

An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.

+
+

source

+
+

TSiTPlus

+
+
 TSiTPlus (c_in:int, c_out:int, seq_len:int, d_model:int=128, depth:int=6,
+           n_heads:int=16, act:str='gelu', lsa:bool=False,
+           attn_dropout:float=0.0, dropout:float=0.0,
+           drop_path_rate:float=0.0, mlp_ratio:int=1, qkv_bias:bool=True,
+           pre_norm:bool=False, use_token:bool=False, use_pe:bool=True,
+           cat_pos:Optional[list]=None, n_cat_embeds:Optional[list]=None,
+           cat_embed_dims:Optional[list]=None,
+           cat_padding_idxs:Optional[list]=None, token_size:int=None,
+           tokenizer:Optional[Callable]=None,
+           feature_extractor:Optional[Callable]=None, flatten:bool=False,
+           concat_pool:bool=True, fc_dropout:float=0.0, use_bn:bool=False,
+           bias_init:Union[float,list,NoneType]=None,
+           y_range:Optional[tuple]=None,
+           custom_head:Optional[Callable]=None, verbose:bool=True,
+           **kwargs)
+
+

Time series transformer model based on ViT (Vision Transformer):

+

Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., … & Houlsby, N. (2020). An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.

+

This implementation is a modified version of Vision Transformer that is part of the grat timm library (https://github.com/rwightman/pytorch-image-models/blob/72b227dcf57c0c62291673b96bdc06576bb90457/timm/models/vision_transformer.py)

+

Args: c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. c_out: the number of target classes. seq_len: number of time steps in the time series. d_model: total dimension of the model (number of features created by the model). depth: number of blocks in the encoder. n_heads: parallel attention heads. Default:16 (range(8-16)). act: the activation function of positionwise feedforward layer. lsa: locality self attention used (see Lee, S. H., Lee, S., & Song, B. C. (2021). Vision Transformer for Small-Size Datasets. arXiv preprint arXiv:2112.13492.) attn_dropout: dropout rate applied to the attention sublayer. dropout: dropout applied to to the embedded sequence steps after position embeddings have been added and to the mlp sublayer in the encoder. drop_path_rate: stochastic depth rate. mlp_ratio: ratio of mlp hidden dim to embedding dim. qkv_bias: determines whether bias is applied to the Linear projections of queries, keys and values in the MultiheadAttention pre_norm: if True normalization will be applied as the first step in the sublayers. Defaults to False. use_token: if True, the output will come from the transformed token. This is meant to be use in classification tasks. use_pe: flag to indicate if positional embedding is used. n_cat_embeds: list with the sizes of the dictionaries of embeddings (int). cat_embed_dims: list with the sizes of each embedding vector (int). cat_padding_idxs: If specified, the entries at cat_padding_idxs do not contribute to the gradient; therefore, the embedding vector at cat_padding_idxs are not updated during training. Use 0 for those categorical embeddings that may have #na# values. Otherwise, leave them as None. You can enter a combination for different embeddings (for example, [0, None, None]). cat_pos: list with the position of the categorical variables in the input. token_size: Size of the embedding function used to reduce the sequence length (similar to ViT’s patch size) tokenizer: nn.Module or callable that will be used to reduce the sequence length feature_extractor: nn.Module or callable that will be used to preprocess the time series before the embedding step. It is useful to extract features or resample the time series. flatten: flag to indicate if the 3d logits will be flattened to 2d in the model’s head if use_token is set to False. If use_token is False and flatten is False, the model will apply a pooling layer. concat_pool: if True the head begins with fastai’s AdaptiveConcatPool2d if concat_pool=True; otherwise, it uses traditional average pooling. fc_dropout: dropout applied to the final fully connected layer. use_bn: flag that indicates if batchnorm will be applied to the head. bias_init: values used to initialized the output layer. y_range: range of possible y values (used in regression tasks).
+custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function) verbose: flag to control verbosity of the model.

+

Input: x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)

+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = TSiTPlus(nvars, c_out, seq_len, attn_dropout=.1, dropout=.1, use_token=True)
+test_eq(model(xb).shape, (bs, c_out))
+model = TSiTPlus(nvars, c_out, seq_len, attn_dropout=.1, dropout=.1, use_token=False)
+test_eq(model(xb).shape, (bs, c_out))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = np.array([0.8, .2])
+model = TSiTPlus(nvars, c_out, seq_len, bias_init=bias_init)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor(bias_init))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 1
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = 8.5
+model = TSiTPlus(nvars, c_out, seq_len, bias_init=bias_init)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor([bias_init]))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = np.array([0.8, .2])
+model = TSiTPlus(nvars, c_out, seq_len, bias_init=bias_init, lsa=True)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor(bias_init))
+
+
+
+

Feature extractor

+

It’s a known fact that transformers cannot be directly applied to long sequences. To avoid this, we have included a way to subsample the sequence to generate a more manageable input.

+
+
from tsai.data.validation import get_splits
+from tsai.data.core import get_ts_dls
+
+
+
X = np.zeros((10, 3, 5000)) 
+y = np.random.randint(0,2,X.shape[0])
+splits = get_splits(y)
+dls = get_ts_dls(X, y, splits=splits)
+xb, yb = dls.train.one_batch()
+xb
+
+
+
+

+
+
+
+
+
TSTensor(samples:8, vars:3, len:5000, device=cpu, dtype=torch.float32)
+
+
+

If you try to use TSiTPlus, it’s likely you’ll get an ‘out-of-memory’ error.

+

To avoid this you can subsample the sequence reducing the input’s length. This can be done in multiple ways. Here are a few examples:

+
+
# Separable convolution (to avoid mixing channels)
+feature_extractor = Conv1d(xb.shape[1], xb.shape[1], ks=100, stride=50, padding=0, groups=xb.shape[1]).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 99])
+
+
+
+
# Convolution (if you want to mix channels or change number of channels)
+feature_extractor=MultiConv1d(xb.shape[1], 64, kss=[1,3,5,7,9], keep_original=True).to(default_device())
+test_eq(feature_extractor.to(xb.device)(xb).shape, (xb.shape[0], 64, xb.shape[-1]))
+
+
+
# MaxPool
+feature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.MaxPool1d(kernel_size=100, stride=50)).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 100])
+
+
+
+
# AvgPool
+feature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.AvgPool1d(kernel_size=100, stride=50)).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 100])
+
+
+

Once you decide what type of transform you want to apply, you just need to pass the layer as the feature_extractor attribute:

+
+
bs = 16
+nvars = 4
+seq_len = 1000
+c_out = 2
+d_model = 128
+
+xb = torch.rand(bs, nvars, seq_len)
+feature_extractor = partial(Conv1d, ks=5, stride=3, padding=0, groups=xb.shape[1])
+model = TSiTPlus(nvars, c_out, seq_len, d_model=d_model, feature_extractor=feature_extractor)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+
+
+
+

Categorical variables

+
+
from tsai.utils import alphabet, ALPHABET
+
+
+
a = alphabet[np.random.randint(0,3,40)]
+b = ALPHABET[np.random.randint(6,10,40)]
+c = np.random.rand(40).reshape(4,1,10)
+map_a = {k:v for v,k in enumerate(np.unique(a))}
+map_b = {k:v for v,k in enumerate(np.unique(b))}
+n_cat_embeds = [len(m.keys()) for m in [map_a, map_b]]
+szs = [emb_sz_rule(n) for n in n_cat_embeds]
+a = np.asarray(a.map(map_a)).reshape(4,1,10)
+b = np.asarray(b.map(map_b)).reshape(4,1,10)
+inp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()
+feature_extractor = partial(Conv1d, ks=3, padding='same')
+model = TSiTPlus(3, 2, 10, d_model=64, cat_pos=[1,2], feature_extractor=feature_extractor)
+test_eq(model(inp).shape, (4,2))
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
+

Sequence Embedding

+

Sometimes you have a samples with a very long sequence length. In those cases you may want to reduce it’s length before passing it to the transformer. To do that you may just pass a token_size like in this example:

+
+
t = torch.rand(8, 2, 10080)
+SeqTokenizer(2, 128, 60)(t).shape
+
+
torch.Size([8, 128, 168])
+
+
+
+
t = torch.rand(8, 2, 10080)
+model = TSiTPlus(2, 5, 10080, d_model=64, token_size=60)
+model(t).shape
+
+
torch.Size([8, 5])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tsperceiver.html b/models.tsperceiver.html new file mode 100644 index 000000000..e03533dff --- /dev/null +++ b/models.tsperceiver.html @@ -0,0 +1,1301 @@ + + + + + + + + + +tsai - TSPerceiver + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TSPerceiver

+
+ + + +
+ + + + +
+ + + +
+ + + +

This implementation is inspired by:

+

Jaegle, A., Gimeno, F., Brock, A., Zisserman, A., Vinyals, O., & Carreira, J. (2021).

+

Perceiver: General Perception with Iterative Attention. arXiv preprint arXiv:2103.03206.

+

Paper: https://arxiv.org/pdf/2103.03206.pdf

+

Official repo: Not available as og April, 2021.

+
+

source

+
+

TSPerceiver

+
+
 TSPerceiver (c_in, c_out, seq_len, cat_szs=0, n_cont=0, n_latents=512,
+              d_latent=128, d_context=None, n_layers=6,
+              self_per_cross_attn=1, share_weights=True, cross_n_heads=1,
+              self_n_heads=8, d_head=None, attn_dropout=0.0,
+              fc_dropout=0.0, concat_pool=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.basics import *
+from tsai.data.all import *
+
+
+
dsid = 'OliveOil'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+ts_features_df = get_ts_features(X, y)
+ts_features_df.shape
+
+
Feature Extraction: 100%|██████████████████████████████████████████| 30/30 [00:00<00:00, 189.16it/s]
+
+
+
(60, 11)
+
+
+
+
# raw ts
+tfms  = [None, [Categorize()]]
+batch_tfms = TSStandardize(by_sample=True)
+ts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
+
+# ts features
+cat_names = None
+cont_names = ts_features_df.columns[:-2]
+y_names = 'target'
+tab_dls = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)
+
+# mixed
+mixed_dls = get_mixed_dls(ts_dls, tab_dls)
+xb, yb = mixed_dls.one_batch()
+
+
+
model = TSPerceiver(ts_dls.vars, ts_dls.c, ts_dls.len, cat_szs=0, 
+                    # n_cont=0, 
+                    n_cont=xb[1][1].shape[1], 
+                    n_latents=128, d_latent=128, n_layers=3, self_per_cross_attn=1, share_weights=True,
+                    cross_n_heads=16, self_n_heads=16, d_head=None, attn_dropout=0., fc_dropout=0.).to(device)
+test_eq(model(xb).shape, (yb.shape[0], len(np.unique(y))))
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tssequencerplus.html b/models.tssequencerplus.html new file mode 100644 index 000000000..8fcb98380 --- /dev/null +++ b/models.tssequencerplus.html @@ -0,0 +1,1544 @@ + + + + + + + + + +tsai - TSSequencerPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TSSequencerPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is a PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on Sequencer: Deep LSTM for Image Classification

+
+
+
class TSSequencerPlus(nn.Sequential):
+    r"""Time Series Sequencer model based on:
+
+    Tatsunami, Y., & Taki, M. (2022). Sequencer: Deep LSTM for Image Classification. arXiv preprint arXiv:2205.01972.
+    Official implementation: https://github.com/okojoalg/sequencer
+
+    Args:
+        c_in:               the number of features (aka variables, dimensions, channels) in the time series dataset.
+        c_out:              the number of target classes.
+        seq_len:            number of time steps in the time series.
+        d_model:            total dimension of the model (number of features created by the model).
+        depth:              number of blocks in the encoder.
+        act:                the activation function of positionwise feedforward layer.
+        lstm_dropout:       dropout rate applied to the lstm sublayer.
+        dropout:            dropout applied to to the embedded sequence steps after position embeddings have been added and 
+                            to the mlp sublayer in the encoder.
+        drop_path_rate:     stochastic depth rate.
+        mlp_ratio:          ratio of mlp hidden dim to embedding dim.
+        lstm_bias:          determines whether bias is applied to the LSTM layer.
+        pre_norm:           if True normalization will be applied as the first step in the sublayers. Defaults to False.
+        use_token:          if True, the output will come from the transformed token. This is meant to be use in classification tasks.
+        use_pe:             flag to indicate if positional embedding is used.
+        n_cat_embeds:       list with the sizes of the dictionaries of embeddings (int).
+        cat_embed_dims:     list with the sizes of each embedding vector (int).
+        cat_padding_idxs:       If specified, the entries at cat_padding_idxs do not contribute to the gradient; therefore, the embedding vector at cat_padding_idxs
+                            are not updated during training. Use 0 for those categorical embeddings that may have #na# values. Otherwise, leave them as None.
+                            You can enter a combination for different embeddings (for example, [0, None, None]).
+        cat_pos:            list with the position of the categorical variables in the input.
+        token_size:         Size of the embedding function used to reduce the sequence length (similar to ViT's patch size)
+        tokenizer:          nn.Module or callable that will be used to reduce the sequence length
+        feature_extractor:  nn.Module or callable that will be used to preprocess the time series before 
+                            the embedding step. It is useful to extract features or resample the time series.
+        flatten:            flag to indicate if the 3d logits will be flattened to 2d in the model's head if use_token is set to False. 
+                            If use_token is False and flatten is False, the model will apply a pooling layer.
+        concat_pool:        if True the head begins with fastai's AdaptiveConcatPool2d if concat_pool=True; otherwise, it uses traditional average pooling. 
+        fc_dropout:         dropout applied to the final fully connected layer.
+        use_bn:             flag that indicates if batchnorm will be applied to the head.
+        bias_init:          values used to initialized the output layer.
+        y_range:            range of possible y values (used in regression tasks).        
+        custom_head:        custom head that will be applied to the network. It must contain all kwargs (pass a partial function)
+        verbose:            flag to control verbosity of the model.
+
+    Input:
+        x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)
+    """
+    
+    def __init__(self, c_in:int, c_out:int, seq_len:int, d_model:int=128, depth:int=6, act:str='gelu',
+                 lstm_dropout:float=0., dropout:float=0., drop_path_rate:float=0., mlp_ratio:int=1, lstm_bias:bool=True, 
+                 pre_norm:bool=False, use_token:bool=False, use_pe:bool=True, 
+                 cat_pos:Optional[list]=None, n_cat_embeds:Optional[list]=None, cat_embed_dims:Optional[list]=None, cat_padding_idxs:Optional[list]=None,
+                 token_size:int=None, tokenizer:Optional[Callable]=None, feature_extractor:Optional[Callable]=None, 
+                 flatten:bool=False, concat_pool:bool=True, fc_dropout:float=0., use_bn:bool=False, 
+                 bias_init:Optional[Union[float, list]]=None, y_range:Optional[tuple]=None, custom_head:Optional[Callable]=None, verbose:bool=True,
+                 **kwargs):
+
+        if use_token and c_out == 1: 
+            use_token = False
+            pv("use_token set to False as c_out == 1", verbose)
+        backbone = _TSSequencerBackbone(c_in, seq_len, depth=depth, d_model=d_model, act=act,
+                                      lstm_dropout=lstm_dropout, dropout=dropout, drop_path_rate=drop_path_rate, 
+                                      pre_norm=pre_norm, mlp_ratio=mlp_ratio, use_pe=use_pe, use_token=use_token, 
+                                      n_cat_embeds=n_cat_embeds, cat_embed_dims=cat_embed_dims, cat_padding_idxs=cat_padding_idxs, cat_pos=cat_pos, 
+                                      feature_extractor=feature_extractor, token_size=token_size, tokenizer=tokenizer)
+
+        self.head_nf = d_model
+        self.c_out = c_out
+        self.seq_len = seq_len
+
+        # Head
+        if custom_head:
+            if isinstance(custom_head, nn.Module): head = custom_head
+            else: head = custom_head(self.head_nf, c_out, seq_len, **kwargs)
+        else:
+            nf = d_model
+            layers = []
+            if use_token: 
+                layers += [TokenLayer()]
+            elif flatten:
+                layers += [Reshape(-1)]
+                nf = nf * seq_len
+            else:
+                if concat_pool: nf *= 2
+                layers = [GACP1d(1) if concat_pool else GAP1d(1)]
+            if use_bn: layers += [nn.BatchNorm1d(nf)]
+            if fc_dropout: layers += [nn.Dropout(fc_dropout)]
+            
+            # Last layer
+            linear = nn.Linear(nf, c_out)
+            if bias_init is not None: 
+                if isinstance(bias_init, float): nn.init.constant_(linear.bias, bias_init)
+                else: linear.bias = nn.Parameter(torch.as_tensor(bias_init, dtype=torch.float32))
+            layers += [linear]
+
+            if y_range: layers += [SigmoidRange(*y_range)]
+            head = nn.Sequential(*layers)
+        super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
+        
+        
+TSSequencer = TSSequencerPlus
+
+
+

source

+
+

TSSequencerPlus

+
+
 TSSequencerPlus (c_in:int, c_out:int, seq_len:int, d_model:int=128,
+                  depth:int=6, act:str='gelu', lstm_dropout:float=0.0,
+                  dropout:float=0.0, drop_path_rate:float=0.0,
+                  mlp_ratio:int=1, lstm_bias:bool=True,
+                  pre_norm:bool=False, use_token:bool=False,
+                  use_pe:bool=True, cat_pos:Optional[list]=None,
+                  n_cat_embeds:Optional[list]=None,
+                  cat_embed_dims:Optional[list]=None,
+                  cat_padding_idxs:Optional[list]=None,
+                  token_size:int=None, tokenizer:Optional[Callable]=None,
+                  feature_extractor:Optional[Callable]=None,
+                  flatten:bool=False, concat_pool:bool=True,
+                  fc_dropout:float=0.0, use_bn:bool=False,
+                  bias_init:Union[float,list,NoneType]=None,
+                  y_range:Optional[tuple]=None,
+                  custom_head:Optional[Callable]=None, verbose:bool=True,
+                  **kwargs)
+
+

Time Series Sequencer model based on:

+

Tatsunami, Y., & Taki, M. (2022). Sequencer: Deep LSTM for Image Classification. arXiv preprint arXiv:2205.01972. Official implementation: https://github.com/okojoalg/sequencer

+

Args: c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. c_out: the number of target classes. seq_len: number of time steps in the time series. d_model: total dimension of the model (number of features created by the model). depth: number of blocks in the encoder. act: the activation function of positionwise feedforward layer. lstm_dropout: dropout rate applied to the lstm sublayer. dropout: dropout applied to to the embedded sequence steps after position embeddings have been added and to the mlp sublayer in the encoder. drop_path_rate: stochastic depth rate. mlp_ratio: ratio of mlp hidden dim to embedding dim. lstm_bias: determines whether bias is applied to the LSTM layer. pre_norm: if True normalization will be applied as the first step in the sublayers. Defaults to False. use_token: if True, the output will come from the transformed token. This is meant to be use in classification tasks. use_pe: flag to indicate if positional embedding is used. n_cat_embeds: list with the sizes of the dictionaries of embeddings (int). cat_embed_dims: list with the sizes of each embedding vector (int). cat_padding_idxs: If specified, the entries at cat_padding_idxs do not contribute to the gradient; therefore, the embedding vector at cat_padding_idxs are not updated during training. Use 0 for those categorical embeddings that may have #na# values. Otherwise, leave them as None. You can enter a combination for different embeddings (for example, [0, None, None]). cat_pos: list with the position of the categorical variables in the input. token_size: Size of the embedding function used to reduce the sequence length (similar to ViT’s patch size) tokenizer: nn.Module or callable that will be used to reduce the sequence length feature_extractor: nn.Module or callable that will be used to preprocess the time series before the embedding step. It is useful to extract features or resample the time series. flatten: flag to indicate if the 3d logits will be flattened to 2d in the model’s head if use_token is set to False. If use_token is False and flatten is False, the model will apply a pooling layer. concat_pool: if True the head begins with fastai’s AdaptiveConcatPool2d if concat_pool=True; otherwise, it uses traditional average pooling. fc_dropout: dropout applied to the final fully connected layer. use_bn: flag that indicates if batchnorm will be applied to the head. bias_init: values used to initialized the output layer. y_range: range of possible y values (used in regression tasks).
+custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function) verbose: flag to control verbosity of the model.

+

Input: x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)

+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = TSSequencerPlus(nvars, c_out, seq_len)
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+model = TSSequencerPlus(nvars, c_out, seq_len, lstm_dropout=.1, dropout=.1, use_token=True)
+test_eq(model(xb).shape, (bs, c_out))
+model = TSSequencerPlus(nvars, c_out, seq_len, lstm_dropout=.1, dropout=.1, use_token=False)
+test_eq(model(xb).shape, (bs, c_out))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = np.array([0.8, .2])
+model = TSSequencerPlus(nvars, c_out, seq_len, bias_init=bias_init)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor(bias_init))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 1
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = 8.5
+model = TSSequencerPlus(nvars, c_out, seq_len, bias_init=bias_init)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor([bias_init]))
+
+
+
bs = 16
+nvars = 4
+seq_len = 50
+c_out = 2
+xb = torch.rand(bs, nvars, seq_len)
+bias_init = np.array([0.8, .2])
+model = TSSequencerPlus(nvars, c_out, seq_len, bias_init=bias_init)
+test_eq(model(xb).shape, (bs, c_out))
+test_eq(model.head[1].bias.data, tensor(bias_init))
+
+
+
+

Feature extractor

+

It’s a known fact that transformers cannot be directly applied to long sequences. To avoid this, we have included a way to subsample the sequence to generate a more manageable input.

+
+
from tsai.data.validation import get_splits
+from tsai.data.core import get_ts_dls
+
+
+
X = np.zeros((10, 3, 5000)) 
+y = np.random.randint(0,2,X.shape[0])
+splits = get_splits(y)
+dls = get_ts_dls(X, y, splits=splits)
+xb, yb = dls.train.one_batch()
+xb
+
+
+
+

+
+
+
+
+
TSTensor(samples:8, vars:3, len:5000, device=cpu, dtype=torch.float32)
+
+
+

If you try to use SequencerPlus, it’s likely you’ll get an ‘out-of-memory’ error.

+

To avoid this you can subsample the sequence reducing the input’s length. This can be done in multiple ways. Here are a few examples:

+
+
# Separable convolution (to avoid mixing channels)
+feature_extractor = Conv1d(xb.shape[1], xb.shape[1], ks=100, stride=50, padding=0, groups=xb.shape[1]).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 99])
+
+
+
+
# Convolution (if you want to mix channels or change number of channels)
+feature_extractor=MultiConv1d(xb.shape[1], 64, kss=[1,3,5,7,9], keep_original=True).to(default_device())
+test_eq(feature_extractor.to(xb.device)(xb).shape, (xb.shape[0], 64, xb.shape[-1]))
+
+
+
# MaxPool
+feature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.MaxPool1d(kernel_size=100, stride=50)).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 100])
+
+
+
+
# AvgPool
+feature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.AvgPool1d(kernel_size=100, stride=50)).to(default_device())
+feature_extractor.to(xb.device)(xb).shape
+
+
torch.Size([8, 3, 100])
+
+
+

Once you decide what type of transform you want to apply, you just need to pass the layer as the feature_extractor attribute:

+
+
bs = 16
+nvars = 4
+seq_len = 1000
+c_out = 2
+d_model = 128
+
+xb = torch.rand(bs, nvars, seq_len)
+feature_extractor = partial(Conv1d, ks=5, stride=3, padding=0, groups=xb.shape[1])
+model = TSSequencerPlus(nvars, c_out, seq_len, d_model=d_model, feature_extractor=feature_extractor)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+
+
+
+

Categorical variables

+
+
from tsai.utils import alphabet, ALPHABET
+
+
+
a = alphabet[np.random.randint(0,3,40)]
+b = ALPHABET[np.random.randint(6,10,40)]
+c = np.random.rand(40).reshape(4,1,10)
+map_a = {k:v for v,k in enumerate(np.unique(a))}
+map_b = {k:v for v,k in enumerate(np.unique(b))}
+n_cat_embeds = [len(m.keys()) for m in [map_a, map_b]]
+szs = [emb_sz_rule(n) for n in n_cat_embeds]
+a = np.asarray(a.map(map_a)).reshape(4,1,10)
+b = np.asarray(b.map(map_b)).reshape(4,1,10)
+inp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()
+feature_extractor = partial(Conv1d, ks=3, padding='same')
+model = TSSequencerPlus(3, 2, 10, d_model=64, cat_pos=[1,2], feature_extractor=feature_extractor)
+test_eq(model(inp).shape, (4,2))
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
+

Sequence Embedding

+

Sometimes you have a samples with a very long sequence length. In those cases you may want to reduce it’s length before passing it to the transformer. To do that you may just pass a token_size like in this example:

+
+
t = torch.rand(8, 2, 10080)
+SeqTokenizer(2, 128, 60)(t).shape
+
+
torch.Size([8, 128, 168])
+
+
+
+
t = torch.rand(8, 2, 10080)
+model = TSSequencerPlus(2, 5, 10080, d_model=64, token_size=60)
+model(t).shape
+
+
torch.Size([8, 5])
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tst.html b/models.tst.html new file mode 100644 index 000000000..cc834456f --- /dev/null +++ b/models.tst.html @@ -0,0 +1,1389 @@ + + + + + + + + + +tsai - TST + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TST

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@timeseriesAI.co based on: * George Zerveas et al. A Transformer-based Framework for Multivariate Time Series Representation Learning, in Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD ’21), August 14–18, 2021. ArXiV version: https://arxiv.org/abs/2010.02803 * Official implementation: https://github.com/gzerveas/mvts_transformer

+
@inproceedings{10.1145/3447548.3467401,
+author = {Zerveas, George and Jayaraman, Srideepika and Patel, Dhaval and Bhamidipaty, Anuradha and Eickhoff, Carsten},
+title = {A Transformer-Based Framework for Multivariate Time Series Representation Learning},
+year = {2021},
+isbn = {9781450383325},
+publisher = {Association for Computing Machinery},
+address = {New York, NY, USA},
+url = {https://doi.org/10.1145/3447548.3467401},
+doi = {10.1145/3447548.3467401},
+booktitle = {Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining},
+pages = {2114–2124},
+numpages = {11},
+keywords = {regression, framework, multivariate time series, classification, transformer, deep learning, self-supervised learning, unsupervised learning, imputation},
+location = {Virtual Event, Singapore},
+series = {KDD '21}
+}
+

This paper uses ‘Attention is all you need’ as a major reference: * Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., … & Polosukhin, I. (2017). Attention is all you need. In Advances in neural information processing systems (pp. 5998-6008).

+

This implementation is adapted to work with the rest of the tsai library, and contain some hyperparameters that are not available in the original implementation. They are included to experiment with them.

+
+

TST arguments

+

Usual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers.

+

The default values are the ones selected as a default configuration in the latter.

+
    +
  • c_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var
  • +
  • c_out: the number of target classes. dls.c
  • +
  • seq_len: number of time steps in the time series. dls.len
  • +
  • max_seq_len: useful to control the temporal resolution in long time series to avoid memory issues. Default. None.
  • +
  • d_model: total dimension of the model (number of features created by the model). Usual values: 128-1024. Default: 128.
  • +
  • n_heads: parallel attention heads. Usual values: 8-16. Default: 16.
  • +
  • d_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.
  • +
  • d_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.
  • +
  • dropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.
  • +
  • activation: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.
  • +
  • n_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.
  • +
  • fc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.
  • +
  • y_range: range of possible y values (used in regression tasks). Default: None
  • +
  • kwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series.
  • +
+
+
+

Imports

+
+
+

TST

+
+
t = torch.rand(16, 50, 128)
+output, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)
+output.shape, attn.shape
+
+
(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))
+
+
+
+
t = torch.rand(16, 50, 128)
+output = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)
+output.shape
+
+
torch.Size([16, 50, 128])
+
+
+
+

source

+
+

TST

+
+
 TST (c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=None,
+      n_layers:int=3, d_model:int=128, n_heads:int=16,
+      d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256,
+      dropout:float=0.1, act:str='gelu', fc_dropout:float=0.0,
+      y_range:Optional[tuple]=None, verbose:bool=False, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 5000
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+max_seq_len = 256
+d_model = 128
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+dropout = 0.1
+activation = "gelu"
+n_layers = 3
+fc_dropout = 0.1
+kwargs = {}
+
+model = TST(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 517378
+
+
+
+
bs = 32
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+max_seq_len = 120
+d_model = 128
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+dropout = 0.1
+act = "gelu"
+n_layers = 3
+fc_dropout = 0.1
+kwargs = {}
+# kwargs = dict(kernel_size=5, padding=2)
+
+model = TST(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, act=act, n_layers=n_layers,
+            fc_dropout=fc_dropout, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 420226
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.tstplus.html b/models.tstplus.html new file mode 100644 index 000000000..c3d66a36d --- /dev/null +++ b/models.tstplus.html @@ -0,0 +1,1725 @@ + + + + + + + + + +tsai - TSTPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TSTPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@timeseriesAI.co based on:

+ +
@inproceedings{10.1145/3447548.3467401,
+author = {Zerveas, George and Jayaraman, Srideepika and Patel, Dhaval and Bhamidipaty, Anuradha and Eickhoff, Carsten},
+title = {A Transformer-Based Framework for Multivariate Time Series Representation Learning},
+year = {2021},
+isbn = {9781450383325},
+publisher = {Association for Computing Machinery},
+address = {New York, NY, USA},
+url = {https://doi.org/10.1145/3447548.3467401},
+doi = {10.1145/3447548.3467401},
+booktitle = {Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery &amp; Data Mining},
+pages = {2114–2124},
+numpages = {11},
+keywords = {regression, framework, multivariate time series, classification, transformer, deep learning, self-supervised learning, unsupervised learning, imputation},
+location = {Virtual Event, Singapore},
+series = {KDD '21}
+}
+ +

This implementation is adapted to work with the rest of the tsai library, and contain some hyperparameters that are not available in the original implementation. I included them for experimenting.

+
+

Imports

+
+
+

TST

+
+
t = torch.rand(16, 50, 128)
+attn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len
+key_padding_mask = torch.zeros(16, 50)
+key_padding_mask[[1, 3, 6, 15], -10:] = 1
+key_padding_mask = key_padding_mask.bool()
+print('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)
+encoder = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=8, d_k=None, d_v=None, d_ff=512, attn_dropout=0., dropout=0.1, store_attn=True, activation='gelu')
+output = encoder(t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
+output.shape
+
+
attn_mask torch.Size([50, 50]) key_padding_mask torch.Size([16, 50])
+
+
+
torch.Size([16, 50, 128])
+
+
+
+
cmap='viridis'
+figsize=(6,5)
+plt.figure(figsize=figsize)
+plt.pcolormesh(encoder.attn[0][0].detach().cpu().numpy(), cmap=cmap)
+plt.title('Self-attention map')
+plt.colorbar()
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+

TSTPlus

+
+
 TSTPlus (c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=512,
+          n_layers:int=3, d_model:int=128, n_heads:int=16,
+          d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256,
+          norm:str='BatchNorm', attn_dropout:float=0.0, dropout:float=0.0,
+          act:str='gelu', key_padding_mask:bool='auto',
+          padding_var:Optional[int]=None,
+          attn_mask:Optional[torch.Tensor]=None, res_attention:bool=True,
+          pre_norm:bool=False, store_attn:bool=False, pe:str='zeros',
+          learn_pe:bool=True, flatten:bool=True, fc_dropout:float=0.0,
+          concat_pool:bool=False, bn:bool=False,
+          custom_head:Optional[Callable]=None,
+          y_range:Optional[tuple]=None, verbose:bool=False, **kwargs)
+
+

TST (Time Series Transformer) is a Transformer that takes continuous time series as inputs

+
+
from tsai.models.utils import build_ts_model
+
+
+
bs = 8
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 1_500
+
+xb = torch.randn(bs, c_in, seq_len).to(device)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+max_seq_len = 256
+d_model = 128
+n_heads = 16
+d_k = d_v = None  # if None --> d_model // n_heads
+d_ff = 256
+norm = "BatchNorm"
+dropout = 0.1
+activation = "gelu"
+n_layers = 3
+fc_dropout = 0.1
+pe = None
+learn_pe = True
+kwargs = {}
+
+model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+                d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm, dropout=dropout, activation=activation, n_layers=n_layers,
+                fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device)
+test_eq(model(xb).shape, [bs, c_out])
+test_eq(model[0], model.backbone)
+test_eq(model[1], model.head)
+model2 = build_ts_model(TSTPlus, c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+                           d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm, dropout=dropout, activation=activation, n_layers=n_layers,
+                           fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device)
+test_eq(model2(xb).shape, [bs, c_out])
+test_eq(model2[0], model2.backbone)
+test_eq(model2[1], model2.head)
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 470018
+
+
+
+
key_padding_mask = torch.sort(torch.randint(0, 2, (bs, max_seq_len))).values.bool().to(device)
+key_padding_mask[0]
+
+
tensor([False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False, False,
+        False, False, False, False, False, False, False, False, False,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True,  True,  True,  True,  True,
+         True,  True,  True,  True,  True,  True])
+
+
+
+
model2.key_padding_mask = True
+model2.to(device)((xb, key_padding_mask)).shape
+
+
torch.Size([8, 2])
+
+
+
+
model.head
+
+
Sequential(
+  (0): GELU(approximate='none')
+  (1): fastai.layers.Flatten(full=False)
+  (2): LinBnDrop(
+    (0): Dropout(p=0.1, inplace=False)
+    (1): Linear(in_features=32768, out_features=2, bias=True)
+  )
+)
+
+
+
+
model = TSTPlus(c_in, c_out, seq_len, pre_norm=True)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+
+
+
bs = 8
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 5000
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+model = TSTPlus(c_in, c_out, seq_len, res_attention=True)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 605698
+
+
+
+
custom_head = partial(create_pool_head, concat_pool=True)
+model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,
+            fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 421122
+
+
+
+
custom_head = partial(create_pool_plus_head, concat_pool=True)
+model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,
+            fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+
+
model parameters: 554240
+
+
+
+
bs = 8
+c_in = 9  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 60
+
+xb = torch.randn(bs, c_in, seq_len)
+
+# standardize by channel by_var based on the training set
+xb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)
+
+# Settings
+max_seq_len = 120
+d_model = 128
+n_heads = 16
+d_k = d_v = None # if None --> d_model // n_heads
+d_ff = 256
+dropout = 0.1
+act = "gelu"
+n_layers = 3
+fc_dropout = 0.1
+pe='zeros'
+learn_pe=True
+kwargs = {}
+# kwargs = dict(kernel_size=5, padding=2)
+
+model = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,
+            d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, act=act, n_layers=n_layers,
+            fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs)
+test_eq(model.to(xb.device)(xb).shape, [bs, c_out])
+print(f'model parameters: {count_parameters(model)}')
+body, head = model[0], model[1]
+test_eq(body.to(xb.device)(xb).ndim, 3)
+test_eq(head.to(xb.device)(body.to(xb.device)(xb)).ndim, 2)
+head
+
+
model parameters: 421762
+
+
+
Sequential(
+  (0): GELU(approximate='none')
+  (1): fastai.layers.Flatten(full=False)
+  (2): LinBnDrop(
+    (0): Dropout(p=0.1, inplace=False)
+    (1): Linear(in_features=7680, out_features=2, bias=True)
+  )
+)
+
+
+
+
model.show_pe()
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
model = TSTPlus(3, 2, 10)
+xb = torch.randn(4, 3, 10)
+yb = torch.randint(0, 2, (4,))
+test_eq(model.backbone._key_padding_mask(xb)[1], None)
+random_idxs = random_choice(len(xb), 2, False)
+xb[random_idxs, :, -5:] = np.nan
+xb[random_idxs, 0, 1] = np.nan
+test_eq(model.backbone._key_padding_mask(xb.clone())[1].data, (torch.isnan(xb).float().mean(1)==1).bool())
+test_eq(model.backbone._key_padding_mask(xb.clone())[1].data.shape, (4,10))
+print(torch.isnan(xb).sum())
+pred = model.to(xb.device)(xb.clone())
+loss = CrossEntropyLossFlat()(pred, yb)
+loss.backward()
+model.to(xb.device).backbone._key_padding_mask(xb)[1].data.shape
+
+
tensor(32)
+
+
+
torch.Size([4, 10])
+
+
+
+
bs = 4
+c_in = 3
+seq_len = 10
+c_out = 2
+xb = torch.randn(bs, c_in, seq_len)
+xb[:, -1] = torch.randint(0, 2, (bs, seq_len)).sort()[0]
+model = TSTPlus(c_in, c_out, seq_len).to(xb.device)
+test_eq(model.backbone._key_padding_mask(xb)[1], None)
+model = TSTPlus(c_in, c_out, seq_len, padding_var=-1).to(xb.device)
+test_eq(model.backbone._key_padding_mask(xb)[1], (xb[:, -1]==1))
+model = TSTPlus(c_in, c_out, seq_len, padding_var=2).to(xb.device)
+test_eq(model.backbone._key_padding_mask(xb)[1], (xb[:, -1]==1))
+test_eq(model(xb).shape, (bs, c_out))
+
+
+
bs = 4
+c_in = 3
+seq_len = 10
+c_out = 2
+xb = torch.randn(bs, c_in, seq_len)
+model = TSTPlus(c_in, c_out, seq_len, act='smelu')
+
+
+

source

+
+
+

MultiTSTPlus

+
+
 MultiTSTPlus (feat_list, c_out, seq_len, max_seq_len:Optional[int]=512,
+               custom_head=None, n_layers:int=3, d_model:int=128,
+               n_heads:int=16, d_k:Optional[int]=None,
+               d_v:Optional[int]=None, d_ff:int=256, norm:str='BatchNorm',
+               attn_dropout:float=0.0, dropout:float=0.0, act:str='gelu',
+               key_padding_mask:bool='auto',
+               padding_var:Optional[int]=None,
+               attn_mask:Optional[torch.Tensor]=None,
+               res_attention:bool=True, pre_norm:bool=False,
+               store_attn:bool=False, pe:str='zeros', learn_pe:bool=True,
+               flatten:bool=True, fc_dropout:float=0.0,
+               concat_pool:bool=False, bn:bool=False,
+               y_range:Optional[tuple]=None, verbose:bool=False)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
bs = 8
+c_in = 7  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 10
+xb2 = torch.randn(bs, c_in, seq_len)
+model1 = MultiTSTPlus([2, 5], c_out, seq_len)
+model2 = MultiTSTPlus(7, c_out, seq_len)
+test_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))
+test_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)
+test_eq(count_parameters(model1) > count_parameters(model2), True)
+
+
+
bs = 8
+c_in = 7  # aka channels, features, variables, dimensions
+c_out = 2
+seq_len = 10
+xb2 = torch.randn(bs, c_in, seq_len)
+model1 = MultiTSTPlus([2, 5], c_out, seq_len, )
+model2 = MultiTSTPlus([[0,2,5], [0,1,3,4,6]], c_out, seq_len)
+test_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))
+test_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)
+
+
+
model1 = MultiTSTPlus([2, 5], c_out, seq_len, y_range=(0.5, 5.5))
+body, head = split_model(model1)
+test_eq(body.to(xb2.device)(xb2).ndim, 3)
+test_eq(head.to(xb2.device)(body.to(xb2.device)(xb2)).ndim, 2)
+head
+
+
Sequential(
+  (0): Sequential(
+    (0): GELU(approximate='none')
+    (1): fastai.layers.Flatten(full=False)
+    (2): LinBnDrop(
+      (0): Linear(in_features=2560, out_features=2, bias=True)
+    )
+  )
+)
+
+
+
+
model = MultiTSTPlus([2, 5], c_out, seq_len, pre_norm=True)
+
+
+
bs = 8
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+net = MultiTSTPlus(n_vars, c_out, seq_len)
+change_model_head(net, create_pool_plus_head, concat_pool=False)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([8, 2])
+
+
+
Sequential(
+  (0): AdaptiveAvgPool1d(output_size=1)
+  (1): Reshape(bs)
+  (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (3): Linear(in_features=128, out_features=512, bias=False)
+  (4): ReLU(inplace=True)
+  (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (6): Linear(in_features=512, out_features=2, bias=False)
+)
+
+
+
+
bs = 8
+n_vars = 3
+seq_len = 12
+c_out = 10
+xb = torch.rand(bs, n_vars, seq_len)
+new_head = partial(conv_lin_nd_head, d=(5 ,2))
+net = MultiTSTPlus(n_vars, c_out, seq_len, custom_head=new_head)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([8, 5, 2, 10])
+
+
+
Sequential(
+  (0): create_conv_lin_nd_head(
+    (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))
+    (1): Linear(in_features=12, out_features=10, bias=True)
+    (2): Transpose(-1, -2)
+    (3): Reshape(bs, 5, 2, 10)
+  )
+)
+
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.utils.html b/models.utils.html new file mode 100644 index 000000000..bf55c0c5f --- /dev/null +++ b/models.utils.html @@ -0,0 +1,1567 @@ + + + + + + + + + +tsai - Model utilities + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Model utilities

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Utility functions used to build PyTorch timeseries models.

+
+
+

source

+
+

apply_idxs

+
+
 apply_idxs (o, idxs)
+
+

Function to apply indices to zarr, dask and numpy arrays

+
+

source

+
+
+

SeqTokenizer

+
+
 SeqTokenizer (c_in, embed_dim, token_size=60, norm=False)
+
+

Generates non-overlapping tokens from sub-sequences within a sequence by applying a sliding window

+
+

source

+
+
+

get_embed_size

+
+
 get_embed_size (n_cat, rule='log2')
+
+
+
test_eq(get_embed_size(35), 6)
+
+
+

source

+
+
+

has_weight_or_bias

+
+
 has_weight_or_bias (l)
+
+
+

source

+
+
+

has_weight

+
+
 has_weight (l)
+
+
+

source

+
+
+

has_bias

+
+
 has_bias (l)
+
+
+

source

+
+
+

is_conv

+
+
 is_conv (l)
+
+
+

source

+
+
+

is_affine_layer

+
+
 is_affine_layer (l)
+
+
+

source

+
+
+

is_conv_linear

+
+
 is_conv_linear (l)
+
+
+

source

+
+
+

is_bn

+
+
 is_bn (l)
+
+
+

source

+
+
+

is_linear

+
+
 is_linear (l)
+
+
+

source

+
+
+

is_layer

+
+
 is_layer (*args)
+
+
+

source

+
+
+

get_layers

+
+
 get_layers (model, cond=<function noop>, full=True)
+
+
+

source

+
+
+

check_weight

+
+
 check_weight (m, cond=<function noop>, verbose=False)
+
+
+

source

+
+
+

check_bias

+
+
 check_bias (m, cond=<function noop>, verbose=False)
+
+
+

source

+
+
+

get_nf

+
+
 get_nf (m)
+
+

Get nf from model’s first linear layer in head

+
+

source

+
+
+

ts_splitter

+
+
 ts_splitter (m)
+
+

Split of a model between body and head

+
+

source

+
+
+

transfer_weights

+
+
 transfer_weights (model, weights_path:pathlib.Path,
+                   device:torch.device=None, exclude_head:bool=True)
+
+

Utility function that allows to easily transfer weights between models. Taken from the great self-supervised repository created by Kerem Turgutlu. https://github.com/KeremTurgutlu/self_supervised/blob/d87ebd9b4961c7da0efd6073c42782bbc61aaa2e/self_supervised/utils.py

+
+

source

+
+
+

build_ts_model

+
+
 build_ts_model (arch, c_in=None, c_out=None, seq_len=None, d=None,
+                 dls=None, device=None, verbose=False, s_cat_idxs=None,
+                 s_cat_embeddings=None, s_cat_embedding_dims=None,
+                 s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,
+                 o_cat_embedding_dims=None, o_cont_idxs=None,
+                 patch_len=None, patch_stride=None, fusion_layers=128,
+                 fusion_act='relu', fusion_dropout=0.0,
+                 fusion_use_bn=True, pretrained=False, weights_path=None,
+                 exclude_head=True, cut=-1, init=None, arch_config={},
+                 **kwargs)
+
+
+

source

+
+
+

count_parameters

+
+
 count_parameters (model, trainable=True)
+
+
+

source

+
+
+

build_tsimage_model

+
+
 build_tsimage_model (arch, c_in=None, c_out=None, dls=None,
+                      pretrained=False, device=None, verbose=False,
+                      init=None, arch_config={}, **kwargs)
+
+
+

source

+
+
+

build_tabular_model

+
+
 build_tabular_model (arch, dls, layers=None, emb_szs=None, n_out=None,
+                      y_range=None, device=None, arch_config={}, **kwargs)
+
+
+
from tsai.data.external import get_UCR_data
+from tsai.data.core import TSCategorize, get_ts_dls
+from tsai.data.preprocessing import TSStandardize
+from tsai.models.InceptionTime import *
+
+
+
X, y, splits = get_UCR_data('NATOPS', split_data=False)
+tfms = [None, TSCategorize()]
+batch_tfms = TSStandardize()
+dls = get_ts_dls(X, y, splits, tfms=tfms, batch_tfms=batch_tfms)
+model = build_ts_model(InceptionTime, dls=dls)
+test_eq(count_parameters(model), 460038)
+
+
+

source

+
+
+

get_clones

+
+
 get_clones (module, N)
+
+
+
m = nn.Conv1d(3,4,3)
+get_clones(m, 3)
+
+
ModuleList(
+  (0-2): 3 x Conv1d(3, 4, kernel_size=(3,), stride=(1,))
+)
+
+
+
+

source

+
+
+

split_model

+
+
 split_model (m)
+
+
+

source

+
+
+

output_size_calculator

+
+
 output_size_calculator (mod, c_in, seq_len=None)
+
+
+
c_in = 3
+seq_len = 30
+m = nn.Conv1d(3, 12, kernel_size=3, stride=2)
+new_c_in, new_seq_len = output_size_calculator(m, c_in, seq_len)
+test_eq((new_c_in, new_seq_len), (12, 14))
+
+
[W NNPACK.cpp:64] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+

source

+
+
+

change_model_head

+
+
 change_model_head (model, custom_head, **kwargs)
+
+

Replaces a model’s head by a custom head as long as the model has a head, head_nf, c_out and seq_len attributes

+
+

source

+
+
+

true_forecaster

+
+
 true_forecaster (o, split, horizon=1)
+
+
+

source

+
+
+

naive_forecaster

+
+
 naive_forecaster (o, split, horizon=1)
+
+
+
a = np.random.rand(20).cumsum()
+split = np.arange(10, 20)
+a, naive_forecaster(a, split, 1), true_forecaster(a, split, 1)
+
+
(array([ 0.74775537,  1.41245663,  2.12445924,  2.8943163 ,  3.56384351,
+         4.23789602,  4.83134182,  5.18560431,  5.30551186,  6.29076506,
+         6.58873471,  7.03661275,  7.0884361 ,  7.57927022,  8.21911791,
+         8.59726773,  9.37382718, 10.17298849, 10.40118308, 10.82265631]),
+ array([ 6.29076506,  6.58873471,  7.03661275,  7.0884361 ,  7.57927022,
+         8.21911791,  8.59726773,  9.37382718, 10.17298849, 10.40118308]),
+ array([ 6.58873471,  7.03661275,  7.0884361 ,  7.57927022,  8.21911791,
+         8.59726773,  9.37382718, 10.17298849, 10.40118308, 10.82265631]))
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xceptiontime.html b/models.xceptiontime.html new file mode 100644 index 000000000..c4269394f --- /dev/null +++ b/models.xceptiontime.html @@ -0,0 +1,1426 @@ + + + + + + + + + +tsai - XceptionTime + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XceptionTime

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co modified on:

+

Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.

+

Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime

+
+

source

+
+

XceptionTime

+
+
 XceptionTime (c_in, c_out, nf=16, nb_filters=None, adaptive_size=50,
+               residual=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

XceptionBlock

+
+
 XceptionBlock (ni, nf, residual=True, ks=40, bottleneck=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

XceptionModule

+
+
 XceptionModule (ni, nf, ks=40, bottleneck=True)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+vars = 3
+seq_len = 12
+c_out = 6
+xb = torch.rand(bs, vars, seq_len)
+test_eq(XceptionTime(vars,c_out)(xb).shape, [bs, c_out])
+test_eq(XceptionTime(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])
+test_eq(XceptionTime(vars,c_out, residual=False)(xb).shape, [bs, c_out])
+test_eq(count_parameters(XceptionTime(3, 2)), 399540)
+
+
+
m = XceptionTime(2,3)
+test_eq(check_weight(m, is_bn)[0].sum(), 5) # 2 shortcut + 3 bn
+test_eq(len(check_bias(m, is_conv)[0]), 0)
+test_eq(len(check_bias(m)[0]), 5) # 2 shortcut + 3 bn
+
+
+
XceptionTime(3, 2)
+
+
XceptionTime(
+  (block): XceptionBlock(
+    (xception): ModuleList(
+      (0): XceptionModule(
+        (bottleneck): Conv1d(3, 16, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): SeparableConv1d(
+            (depthwise_conv): Conv1d(16, 16, kernel_size=(39,), stride=(1,), padding=(19,), groups=16, bias=False)
+            (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (1): SeparableConv1d(
+            (depthwise_conv): Conv1d(16, 16, kernel_size=(19,), stride=(1,), padding=(9,), groups=16, bias=False)
+            (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (2): SeparableConv1d(
+            (depthwise_conv): Conv1d(16, 16, kernel_size=(9,), stride=(1,), padding=(4,), groups=16, bias=False)
+            (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(3, 16, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+      )
+      (1): XceptionModule(
+        (bottleneck): Conv1d(64, 32, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): SeparableConv1d(
+            (depthwise_conv): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), groups=32, bias=False)
+            (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (1): SeparableConv1d(
+            (depthwise_conv): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), groups=32, bias=False)
+            (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (2): SeparableConv1d(
+            (depthwise_conv): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), groups=32, bias=False)
+            (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(64, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+      )
+      (2): XceptionModule(
+        (bottleneck): Conv1d(128, 64, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): SeparableConv1d(
+            (depthwise_conv): Conv1d(64, 64, kernel_size=(39,), stride=(1,), padding=(19,), groups=64, bias=False)
+            (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (1): SeparableConv1d(
+            (depthwise_conv): Conv1d(64, 64, kernel_size=(19,), stride=(1,), padding=(9,), groups=64, bias=False)
+            (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (2): SeparableConv1d(
+            (depthwise_conv): Conv1d(64, 64, kernel_size=(9,), stride=(1,), padding=(4,), groups=64, bias=False)
+            (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(128, 64, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+      )
+      (3): XceptionModule(
+        (bottleneck): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)
+        (convs): ModuleList(
+          (0): SeparableConv1d(
+            (depthwise_conv): Conv1d(128, 128, kernel_size=(39,), stride=(1,), padding=(19,), groups=128, bias=False)
+            (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (1): SeparableConv1d(
+            (depthwise_conv): Conv1d(128, 128, kernel_size=(19,), stride=(1,), padding=(9,), groups=128, bias=False)
+            (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
+          )
+          (2): SeparableConv1d(
+            (depthwise_conv): Conv1d(128, 128, kernel_size=(9,), stride=(1,), padding=(4,), groups=128, bias=False)
+            (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (maxconvpool): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (concat): Concat(dim=1)
+      )
+    )
+    (shortcut): ModuleList(
+      (0): ConvBlock(
+        (0): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)
+        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (1): ConvBlock(
+        (0): Conv1d(128, 512, kernel_size=(1,), stride=(1,), bias=False)
+        (1): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+    )
+    (add): Add
+    (act): ReLU()
+  )
+  (head): Sequential(
+    (0): AdaptiveAvgPool1d(output_size=50)
+    (1): ConvBlock(
+      (0): Conv1d(512, 256, kernel_size=(1,), stride=(1,), bias=False)
+      (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (2): ConvBlock(
+      (0): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (3): ConvBlock(
+      (0): Conv1d(128, 2, kernel_size=(1,), stride=(1,), bias=False)
+      (1): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (4): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Flatten(full=False)
+    )
+  )
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xceptiontimeplus.html b/models.xceptiontimeplus.html new file mode 100644 index 000000000..ce5e3920d --- /dev/null +++ b/models.xceptiontimeplus.html @@ -0,0 +1,1550 @@ + + + + + + + + + +tsai - XceptionTimePlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XceptionTimePlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co modified on:

+

Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.

+

Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime

+
+

source

+
+

XceptionTimePlus

+
+
 XceptionTimePlus (c_in, c_out, seq_len=None, nf=16, nb_filters=None,
+                   coord=False, norm='Batch', concat_pool=False,
+                   adaptive_size=50, custom_head=None, residual=True,
+                   zero_norm=False, act=<class
+                   'torch.nn.modules.activation.ReLU'>, act_kwargs={})
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

XceptionBlockPlus

+
+
 XceptionBlockPlus (ni, nf, residual=True, coord=False, norm='Batch',
+                    zero_norm=False, act=<class
+                    'torch.nn.modules.activation.ReLU'>, act_kwargs={},
+                    ks=40, kss=None, bottleneck=True, separable=True,
+                    bn_1st=True, norm_act=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+

source

+
+
+

XceptionModulePlus

+
+
 XceptionModulePlus (ni, nf, ks=40, kss=None, bottleneck=True,
+                     coord=False, separable=True, norm='Batch',
+                     zero_norm=False, bn_1st=True, act=<class
+                     'torch.nn.modules.activation.ReLU'>, act_kwargs={},
+                     norm_act=False)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
bs = 16
+vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, vars, seq_len)
+
+
+
test_eq(XceptionTimePlus(vars,c_out)(xb).shape, [bs, c_out])
+test_eq(XceptionTimePlus(vars,c_out, nf=32)(xb).shape, [bs, c_out])
+test_eq(XceptionTimePlus(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])
+test_eq(XceptionTimePlus(vars,c_out, residual=False)(xb).shape, [bs, c_out])
+test_eq(XceptionTimePlus(vars,c_out, coord=True)(xb).shape, [bs, c_out])
+test_eq(XceptionTimePlus(vars,c_out, concat_pool=True)(xb).shape, [bs, c_out])
+test_eq(count_parameters(XceptionTimePlus(3, 2)), 399540)
+
+
+
m = XceptionTimePlus(2,3)
+test_eq(check_weight(m, is_bn)[0].sum(), 5)
+test_eq(len(check_bias(m, is_conv)[0]), 0)
+m = XceptionTimePlus(2,3, zero_norm=True)
+test_eq(check_weight(m, is_bn)[0].sum(), 5)
+m = XceptionTimePlus(2,3, zero_norm=True, norm_act=True)
+test_eq(check_weight(m, is_bn)[0].sum(), 7)
+
+
+
m = XceptionTimePlus(2,3, coord=True)
+test_eq(len(get_layers(m, cond=is_layer(AddCoords1d))), 25)
+test_eq(len(get_layers(m, cond=is_layer(nn.Conv1d))), 37)
+m = XceptionTimePlus(2,3, bottleneck=False, coord=True)
+test_eq(len(get_layers(m, cond=is_layer(AddCoords1d))), 21)
+test_eq(len(get_layers(m, cond=is_layer(nn.Conv1d))), 33)
+
+
+
m = XceptionTimePlus(vars, c_out, seq_len=seq_len, custom_head=mlp_head)
+test_eq(m(xb).shape, [bs, c_out])
+
+
+
XceptionTimePlus(vars, c_out, coord=True)
+
+
XceptionTimePlus(
+  (backbone): XceptionBlockPlus(
+    (xception): ModuleList(
+      (0): XceptionModulePlus(
+        (bottleneck): ConvBlock(
+          (0): AddCoords1d()
+          (1): Conv1d(4, 16, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (convs): ModuleList(
+          (0): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(17, 17, kernel_size=(39,), stride=(1,), padding=(19,), groups=17, bias=False)
+              (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(17, 17, kernel_size=(19,), stride=(1,), padding=(9,), groups=17, bias=False)
+              (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (2): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(17, 17, kernel_size=(9,), stride=(1,), padding=(4,), groups=17, bias=False)
+              (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+        )
+        (mp_conv): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(4, 16, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (concat): Concat(dim=1)
+      )
+      (1): XceptionModulePlus(
+        (bottleneck): ConvBlock(
+          (0): AddCoords1d()
+          (1): Conv1d(65, 32, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (convs): ModuleList(
+          (0): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)
+              (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)
+              (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (2): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)
+              (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+        )
+        (mp_conv): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(65, 32, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (concat): Concat(dim=1)
+      )
+      (2): XceptionModulePlus(
+        (bottleneck): ConvBlock(
+          (0): AddCoords1d()
+          (1): Conv1d(129, 64, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (convs): ModuleList(
+          (0): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(65, 65, kernel_size=(39,), stride=(1,), padding=(19,), groups=65, bias=False)
+              (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(65, 65, kernel_size=(19,), stride=(1,), padding=(9,), groups=65, bias=False)
+              (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (2): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(65, 65, kernel_size=(9,), stride=(1,), padding=(4,), groups=65, bias=False)
+              (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+        )
+        (mp_conv): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(129, 64, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (concat): Concat(dim=1)
+      )
+      (3): XceptionModulePlus(
+        (bottleneck): ConvBlock(
+          (0): AddCoords1d()
+          (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)
+        )
+        (convs): ModuleList(
+          (0): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(129, 129, kernel_size=(39,), stride=(1,), padding=(19,), groups=129, bias=False)
+              (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(129, 129, kernel_size=(19,), stride=(1,), padding=(9,), groups=129, bias=False)
+              (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+          (2): ConvBlock(
+            (0): AddCoords1d()
+            (1): SeparableConv1d(
+              (depthwise_conv): Conv1d(129, 129, kernel_size=(9,), stride=(1,), padding=(4,), groups=129, bias=False)
+              (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)
+            )
+          )
+        )
+        (mp_conv): Sequential(
+          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
+          (1): ConvBlock(
+            (0): AddCoords1d()
+            (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)
+          )
+        )
+        (concat): Concat(dim=1)
+      )
+    )
+    (shortcut): ModuleList(
+      (0): ConvBlock(
+        (0): AddCoords1d()
+        (1): Conv1d(4, 128, kernel_size=(1,), stride=(1,), bias=False)
+        (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+      (1): ConvBlock(
+        (0): AddCoords1d()
+        (1): Conv1d(129, 512, kernel_size=(1,), stride=(1,), bias=False)
+        (2): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      )
+    )
+    (act): ModuleList(
+      (0): ReLU()
+      (1): ReLU()
+    )
+    (add): Add
+  )
+  (head): Sequential(
+    (0): AdaptiveAvgPool1d(output_size=50)
+    (1): ConvBlock(
+      (0): AddCoords1d()
+      (1): Conv1d(513, 256, kernel_size=(1,), stride=(1,), bias=False)
+      (2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (2): ConvBlock(
+      (0): AddCoords1d()
+      (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)
+      (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (3): ConvBlock(
+      (0): AddCoords1d()
+      (1): Conv1d(129, 2, kernel_size=(1,), stride=(1,), bias=False)
+      (2): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (4): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+  )
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xcm.html b/models.xcm.html new file mode 100644 index 000000000..3a448a061 --- /dev/null +++ b/models.xcm.html @@ -0,0 +1,1408 @@ + + + + + + + + + +tsai - XCM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XCM

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

An Explainable Convolutional Neural Network for Multivariate Time Series Classification

+
+

This is an unofficial PyTorch implementation of XCM created by Ignacio Oguiza (oguiza@timeseriesAI.co)

+
+

source

+
+

XCM

+
+
 XCM (c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128,
+      window_perc:float=1.0, flatten:bool=False, custom_head:<built-
+      infunctioncallable>=None, concat_pool:bool=False,
+      fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None, **kwargs)
+
+

Same as nn.Module, but no need for subclasses to call super().__init__

+
+
from tsai.data.basics import *
+from tsai.learner import *
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+tfms = [None, TSCategorize()]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
+model =  XCM(dls.vars, dls.c, dls.len)
+learn = ts_learner(dls, model, metrics=accuracy)
+xb, yb = dls.one_batch()
+
+bs, c_in, seq_len = xb.shape
+c_out = len(np.unique(yb.cpu().numpy()))
+
+model = XCM(c_in, c_out, seq_len, fc_dropout=.5)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = XCM(c_in, c_out, seq_len, concat_pool=True)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = XCM(c_in, c_out, seq_len)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model
+
+
XCM(
+  (conv2dblock): Sequential(
+    (0): Unsqueeze(dim=1)
+    (1): Conv2dSame(
+      (conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))
+    )
+    (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (3): ReLU()
+  )
+  (conv2d1x1block): Sequential(
+    (0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
+    (1): ReLU()
+    (2): Squeeze(dim=1)
+  )
+  (conv1dblock): Sequential(
+    (0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (conv1d1x1block): Sequential(
+    (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))
+    (1): ReLU()
+  )
+  (concat): Concat(dim=1)
+  (conv1d): Sequential(
+    (0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))
+    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+    (2): ReLU()
+  )
+  (head): Sequential(
+    (0): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+    (1): LinBnDrop(
+      (0): Linear(in_features=128, out_features=6, bias=True)
+    )
+  )
+)
+
+
+
+
model.show_gradcam(xb, yb)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
model.show_gradcam(xb[0], yb[0])
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 10
+xb = torch.rand(bs, n_vars, seq_len)
+new_head = partial(conv_lin_nd_head, d=(5, 2))
+net = XCM(n_vars, c_out, seq_len, custom_head=new_head)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 5, 2, 10])
+
+
+
create_conv_lin_nd_head(
+  (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))
+  (1): Linear(in_features=12, out_features=10, bias=True)
+  (2): Transpose(-1, -2)
+  (3): Reshape(bs, 5, 2, 10)
+)
+
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+net = XCM(n_vars, c_out, seq_len)
+change_model_head(net, create_pool_plus_head, concat_pool=False)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 2])
+
+
+
Sequential(
+  (0): AdaptiveAvgPool1d(output_size=1)
+  (1): Reshape(bs)
+  (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (3): Linear(in_features=128, out_features=512, bias=False)
+  (4): ReLU(inplace=True)
+  (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (6): Linear(in_features=512, out_features=2, bias=False)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xcmplus.html b/models.xcmplus.html new file mode 100644 index 000000000..f502df531 --- /dev/null +++ b/models.xcmplus.html @@ -0,0 +1,1433 @@ + + + + + + + + + +tsai - XCMPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XCMPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +

This is an unofficial PyTorch implementation of XCM created by Ignacio Oguiza (oguiza@timeseriesAI.co).

+
+

source

+
+

XCMPlus

+
+
 XCMPlus (c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128,
+          window_perc:float=1.0, flatten:bool=False, custom_head:<built-
+          infunctioncallable>=None, concat_pool:bool=False,
+          fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None,
+          **kwargs)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+
from tsai.data.basics import *
+from tsai.learner import *
+
+
+
dsid = 'NATOPS'
+X, y, splits = get_UCR_data(dsid, split_data=False)
+tfms = [None, TSCategorize()]
+dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
+model =  XCMPlus(dls.vars, dls.c, dls.len)
+learn = ts_learner(dls, model, metrics=accuracy)
+xb, yb = dls.one_batch()
+
+bs, c_in, seq_len = xb.shape
+c_out = len(np.unique(yb.cpu().numpy()))
+
+model = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = XCMPlus(c_in, c_out, seq_len, concat_pool=True)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model = XCMPlus(c_in, c_out, seq_len)
+test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
+model
+
+
XCMPlus(
+  (backbone): _XCMPlus_Backbone(
+    (conv2dblock): Sequential(
+      (0): Unsqueeze(dim=1)
+      (1): Conv2dSame(
+        (conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))
+      )
+      (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (3): ReLU()
+    )
+    (conv2d1x1block): Sequential(
+      (0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
+      (1): ReLU()
+      (2): Squeeze(dim=1)
+    )
+    (conv1dblock): Sequential(
+      (0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+    (conv1d1x1block): Sequential(
+      (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))
+      (1): ReLU()
+    )
+    (concat): Concat(dim=1)
+    (conv1d): Sequential(
+      (0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))
+      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+      (2): ReLU()
+    )
+  )
+  (head): Sequential(
+    (0): GAP1d(
+      (gap): AdaptiveAvgPool1d(output_size=1)
+      (flatten): Reshape(bs)
+    )
+    (1): LinBnDrop(
+      (0): Linear(in_features=128, out_features=6, bias=True)
+    )
+  )
+)
+
+
+
+
model.show_gradcam(xb, yb)
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
model.show_gradcam(xb[0], yb[0])
+
+
[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 1
+xb = torch.rand(bs, n_vars, seq_len)
+new_head = partial(conv_lin_nd_head, d=(5, 2))
+net = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 5, 2])
+
+
+
create_conv_lin_nd_head(
+  (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))
+  (1): Linear(in_features=12, out_features=10, bias=True)
+  (2): Transpose(-1, -2)
+  (3): Reshape(bs, 5, 2)
+)
+
+
+
+
bs = 16
+n_vars = 3
+seq_len = 12
+c_out = 2
+xb = torch.rand(bs, n_vars, seq_len)
+net = XCMPlus(n_vars, c_out, seq_len)
+change_model_head(net, create_pool_plus_head, concat_pool=False)
+print(net.to(xb.device)(xb).shape)
+net.head
+
+
torch.Size([16, 2])
+
+
+
Sequential(
+  (0): AdaptiveAvgPool1d(output_size=1)
+  (1): Reshape(bs)
+  (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (3): Linear(in_features=128, out_features=512, bias=False)
+  (4): ReLU(inplace=True)
+  (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
+  (6): Linear(in_features=512, out_features=2, bias=False)
+)
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xresnet1d.html b/models.xresnet1d.html new file mode 100644 index 000000000..e3895ee5c --- /dev/null +++ b/models.xresnet1d.html @@ -0,0 +1,3523 @@ + + + + + + + + + +tsai - XResNet1d + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XResNet1d

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is a modified version of fastai’s XResNet model in github

+
+
+

source

+
+

xresnet1d50_deeper

+
+
 xresnet1d50_deeper (c_in, c_out, act=<class
+                     'torch.nn.modules.activation.ReLU'>, stride=1,
+                     groups=1, reduction=None, nh1=None, nh2=None,
+                     dw=False, g2=1, sa=False, sym=False,
+                     norm_type=<NormType.Batch: 1>, act_cls=<class
+                     'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                     pool=<function AvgPool>, pool_first=True,
+                     padding=None, bias=None, bn_1st=True,
+                     transpose=False, init='auto', xtra=None,
+                     bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,
+                     padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34_deeper

+
+
 xresnet1d34_deeper (c_in, c_out, act=<class
+                     'torch.nn.modules.activation.ReLU'>, stride=1,
+                     groups=1, reduction=None, nh1=None, nh2=None,
+                     dw=False, g2=1, sa=False, sym=False,
+                     norm_type=<NormType.Batch: 1>, act_cls=<class
+                     'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                     pool=<function AvgPool>, pool_first=True,
+                     padding=None, bias=None, bn_1st=True,
+                     transpose=False, init='auto', xtra=None,
+                     bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,
+                     padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18_deeper

+
+
 xresnet1d18_deeper (c_in, c_out, act=<class
+                     'torch.nn.modules.activation.ReLU'>, stride=1,
+                     groups=1, reduction=None, nh1=None, nh2=None,
+                     dw=False, g2=1, sa=False, sym=False,
+                     norm_type=<NormType.Batch: 1>, act_cls=<class
+                     'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                     pool=<function AvgPool>, pool_first=True,
+                     padding=None, bias=None, bn_1st=True,
+                     transpose=False, init='auto', xtra=None,
+                     bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,
+                     padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d50_deep

+
+
 xresnet1d50_deep (c_in, c_out, act=<class
+                   'torch.nn.modules.activation.ReLU'>, stride=1,
+                   groups=1, reduction=None, nh1=None, nh2=None, dw=False,
+                   g2=1, sa=False, sym=False, norm_type=<NormType.Batch:
+                   1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                   ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                   padding=None, bias=None, bn_1st=True, transpose=False,
+                   init='auto', xtra=None, bias_std=0.01,
+                   dilation:Union[int,Tuple[int,int]]=1,
+                   padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34_deep

+
+
 xresnet1d34_deep (c_in, c_out, act=<class
+                   'torch.nn.modules.activation.ReLU'>, stride=1,
+                   groups=1, reduction=None, nh1=None, nh2=None, dw=False,
+                   g2=1, sa=False, sym=False, norm_type=<NormType.Batch:
+                   1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                   ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                   padding=None, bias=None, bn_1st=True, transpose=False,
+                   init='auto', xtra=None, bias_std=0.01,
+                   dilation:Union[int,Tuple[int,int]]=1,
+                   padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18_deep

+
+
 xresnet1d18_deep (c_in, c_out, act=<class
+                   'torch.nn.modules.activation.ReLU'>, stride=1,
+                   groups=1, reduction=None, nh1=None, nh2=None, dw=False,
+                   g2=1, sa=False, sym=False, norm_type=<NormType.Batch:
+                   1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                   ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                   padding=None, bias=None, bn_1st=True, transpose=False,
+                   init='auto', xtra=None, bias_std=0.01,
+                   dilation:Union[int,Tuple[int,int]]=1,
+                   padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d152

+
+
 xresnet1d152 (c_in, c_out, act=<class
+               'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,
+               reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+               sa=False, sym=False, norm_type=<NormType.Batch: 1>,
+               act_cls=<class 'torch.nn.modules.activation.ReLU'>, ndim=2,
+               ks=3, pool=<function AvgPool>, pool_first=True,
+               padding=None, bias=None, bn_1st=True, transpose=False,
+               init='auto', xtra=None, bias_std=0.01,
+               dilation:Union[int,Tuple[int,int]]=1,
+               padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d101

+
+
 xresnet1d101 (c_in, c_out, act=<class
+               'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,
+               reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+               sa=False, sym=False, norm_type=<NormType.Batch: 1>,
+               act_cls=<class 'torch.nn.modules.activation.ReLU'>, ndim=2,
+               ks=3, pool=<function AvgPool>, pool_first=True,
+               padding=None, bias=None, bn_1st=True, transpose=False,
+               init='auto', xtra=None, bias_std=0.01,
+               dilation:Union[int,Tuple[int,int]]=1,
+               padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d50

+
+
 xresnet1d50 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,
+              stride=1, groups=1, reduction=None, nh1=None, nh2=None,
+              dw=False, g2=1, sa=False, sym=False,
+              norm_type=<NormType.Batch: 1>, act_cls=<class
+              'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+              pool=<function AvgPool>, pool_first=True, padding=None,
+              bias=None, bn_1st=True, transpose=False, init='auto',
+              xtra=None, bias_std=0.01,
+              dilation:Union[int,Tuple[int,int]]=1,
+              padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34

+
+
 xresnet1d34 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,
+              stride=1, groups=1, reduction=None, nh1=None, nh2=None,
+              dw=False, g2=1, sa=False, sym=False,
+              norm_type=<NormType.Batch: 1>, act_cls=<class
+              'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+              pool=<function AvgPool>, pool_first=True, padding=None,
+              bias=None, bn_1st=True, transpose=False, init='auto',
+              xtra=None, bias_std=0.01,
+              dilation:Union[int,Tuple[int,int]]=1,
+              padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18

+
+
 xresnet1d18 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,
+              stride=1, groups=1, reduction=None, nh1=None, nh2=None,
+              dw=False, g2=1, sa=False, sym=False,
+              norm_type=<NormType.Batch: 1>, act_cls=<class
+              'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+              pool=<function AvgPool>, pool_first=True, padding=None,
+              bias=None, bn_1st=True, transpose=False, init='auto',
+              xtra=None, bias_std=0.01,
+              dilation:Union[int,Tuple[int,int]]=1,
+              padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+
bs, c_in, seq_len = 2, 4, 32
+c_out = 2
+x = torch.rand(bs, c_in, seq_len)
+archs = [
+    xresnet1d18, xresnet1d34, xresnet1d50, 
+    xresnet1d18_deep, xresnet1d34_deep, xresnet1d50_deep, xresnet1d18_deeper,
+    xresnet1d34_deeper, xresnet1d50_deeper
+#     # Long test
+#     xresnet1d101, xresnet1d152,
+]
+for i, arch in enumerate(archs):
+    print(i, arch.__name__)
+    test_eq(arch(c_in, c_out, sa=True, act=Mish)(x).shape, (bs, c_out))
+
+
0 xresnet1d18
+1 xresnet1d34
+2 xresnet1d50
+3 xresnet1d18_deep
+4 xresnet1d34_deep
+5 xresnet1d50_deep
+6 xresnet1d18_deeper
+7 xresnet1d34_deeper
+8 xresnet1d50_deeper
+
+
+
+
m = xresnet1d34(4, 2, act=Mish)
+test_eq(len(get_layers(m, is_bn)), 38)
+test_eq(check_weight(m, is_bn)[0].sum(), 22)
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/models.xresnet1dplus.html b/models.xresnet1dplus.html new file mode 100644 index 000000000..95e698291 --- /dev/null +++ b/models.xresnet1dplus.html @@ -0,0 +1,3698 @@ + + + + + + + + + +tsai - XResNet1dPlus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

XResNet1dPlus

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This is a modified version of fastai’s XResNet model in github

+
+
+

source

+
+

XResNet1dPlus

+
+
 XResNet1dPlus (block=<class 'tsai.models.layers.ResBlock1dPlus'>,
+                expansion=4, layers=[3, 4, 6, 3], fc_dropout=0.0, c_in=3,
+                c_out=None, n_out=1000, seq_len=None, stem_szs=(32, 32,
+                64), widen=1.0, sa=False, act_cls=<class
+                'torch.nn.modules.activation.ReLU'>, ks=3, stride=2,
+                coord=False, custom_head=None, block_szs_base=(64, 128,
+                256, 512), groups=1, reduction=None, nh1=None, nh2=None,
+                dw=False, g2=1, sym=False, norm='Batch', zero_norm=True,
+                pool=<function AvgPool>, pool_first=True)
+
+

A sequential container.

+

Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

+

The value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).

+

What’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.

+

Example::

+
# Using Sequential to create a small model. When `model` is run,
+# input will first be passed to `Conv2d(1,20,5)`. The output of
+# `Conv2d(1,20,5)` will be used as the input to the first
+# `ReLU`; the output of the first `ReLU` will become the input
+# for `Conv2d(20,64,5)`. Finally, the output of
+# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
+model = nn.Sequential(
+          nn.Conv2d(1,20,5),
+          nn.ReLU(),
+          nn.Conv2d(20,64,5),
+          nn.ReLU()
+        )
+
+# Using Sequential with OrderedDict. This is functionally the
+# same as the above code
+model = nn.Sequential(OrderedDict([
+          ('conv1', nn.Conv2d(1,20,5)),
+          ('relu1', nn.ReLU()),
+          ('conv2', nn.Conv2d(20,64,5)),
+          ('relu2', nn.ReLU())
+        ]))
+
+

source

+
+
+

xresnet1d50_deeperplus

+
+
 xresnet1d50_deeperplus (c_in, c_out, seq_len=None, act=<class
+                         'torch.nn.modules.activation.ReLU'>, stride=1,
+                         groups=1, reduction=None, nh1=None, nh2=None,
+                         dw=False, g2=1, sa=False, sym=False,
+                         norm_type=<NormType.Batch: 1>, act_cls=<class
+                         'torch.nn.modules.activation.ReLU'>, ndim=2,
+                         ks=3, pool=<function AvgPool>, pool_first=True,
+                         padding=None, bias=None, bn_1st=True,
+                         transpose=False, init='auto', xtra=None,
+                         bias_std=0.01,
+                         dilation:Union[int,Tuple[int,int]]=1,
+                         padding_mode:str='zeros', device=None,
+                         dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34_deeperplus

+
+
 xresnet1d34_deeperplus (c_in, c_out, seq_len=None, act=<class
+                         'torch.nn.modules.activation.ReLU'>, stride=1,
+                         groups=1, reduction=None, nh1=None, nh2=None,
+                         dw=False, g2=1, sa=False, sym=False,
+                         norm_type=<NormType.Batch: 1>, act_cls=<class
+                         'torch.nn.modules.activation.ReLU'>, ndim=2,
+                         ks=3, pool=<function AvgPool>, pool_first=True,
+                         padding=None, bias=None, bn_1st=True,
+                         transpose=False, init='auto', xtra=None,
+                         bias_std=0.01,
+                         dilation:Union[int,Tuple[int,int]]=1,
+                         padding_mode:str='zeros', device=None,
+                         dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18_deeperplus

+
+
 xresnet1d18_deeperplus (c_in, c_out, seq_len=None, act=<class
+                         'torch.nn.modules.activation.ReLU'>, stride=1,
+                         groups=1, reduction=None, nh1=None, nh2=None,
+                         dw=False, g2=1, sa=False, sym=False,
+                         norm_type=<NormType.Batch: 1>, act_cls=<class
+                         'torch.nn.modules.activation.ReLU'>, ndim=2,
+                         ks=3, pool=<function AvgPool>, pool_first=True,
+                         padding=None, bias=None, bn_1st=True,
+                         transpose=False, init='auto', xtra=None,
+                         bias_std=0.01,
+                         dilation:Union[int,Tuple[int,int]]=1,
+                         padding_mode:str='zeros', device=None,
+                         dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d50_deepplus

+
+
 xresnet1d50_deepplus (c_in, c_out, seq_len=None, act=<class
+                       'torch.nn.modules.activation.ReLU'>, stride=1,
+                       groups=1, reduction=None, nh1=None, nh2=None,
+                       dw=False, g2=1, sa=False, sym=False,
+                       norm_type=<NormType.Batch: 1>, act_cls=<class
+                       'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                       pool=<function AvgPool>, pool_first=True,
+                       padding=None, bias=None, bn_1st=True,
+                       transpose=False, init='auto', xtra=None,
+                       bias_std=0.01,
+                       dilation:Union[int,Tuple[int,int]]=1,
+                       padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34_deepplus

+
+
 xresnet1d34_deepplus (c_in, c_out, seq_len=None, act=<class
+                       'torch.nn.modules.activation.ReLU'>, stride=1,
+                       groups=1, reduction=None, nh1=None, nh2=None,
+                       dw=False, g2=1, sa=False, sym=False,
+                       norm_type=<NormType.Batch: 1>, act_cls=<class
+                       'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                       pool=<function AvgPool>, pool_first=True,
+                       padding=None, bias=None, bn_1st=True,
+                       transpose=False, init='auto', xtra=None,
+                       bias_std=0.01,
+                       dilation:Union[int,Tuple[int,int]]=1,
+                       padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18_deepplus

+
+
 xresnet1d18_deepplus (c_in, c_out, seq_len=None, act=<class
+                       'torch.nn.modules.activation.ReLU'>, stride=1,
+                       groups=1, reduction=None, nh1=None, nh2=None,
+                       dw=False, g2=1, sa=False, sym=False,
+                       norm_type=<NormType.Batch: 1>, act_cls=<class
+                       'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,
+                       pool=<function AvgPool>, pool_first=True,
+                       padding=None, bias=None, bn_1st=True,
+                       transpose=False, init='auto', xtra=None,
+                       bias_std=0.01,
+                       dilation:Union[int,Tuple[int,int]]=1,
+                       padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d152plus

+
+
 xresnet1d152plus (c_in, c_out, seq_len=None, act=<class
+                   'torch.nn.modules.activation.ReLU'>, stride=1,
+                   groups=1, reduction=None, nh1=None, nh2=None, dw=False,
+                   g2=1, sa=False, sym=False, norm_type=<NormType.Batch:
+                   1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                   ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                   padding=None, bias=None, bn_1st=True, transpose=False,
+                   init='auto', xtra=None, bias_std=0.01,
+                   dilation:Union[int,Tuple[int,int]]=1,
+                   padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d101plus

+
+
 xresnet1d101plus (c_in, c_out, seq_len=None, act=<class
+                   'torch.nn.modules.activation.ReLU'>, stride=1,
+                   groups=1, reduction=None, nh1=None, nh2=None, dw=False,
+                   g2=1, sa=False, sym=False, norm_type=<NormType.Batch:
+                   1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                   ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                   padding=None, bias=None, bn_1st=True, transpose=False,
+                   init='auto', xtra=None, bias_std=0.01,
+                   dilation:Union[int,Tuple[int,int]]=1,
+                   padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d50plus

+
+
 xresnet1d50plus (c_in, c_out, seq_len=None, act=<class
+                  'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,
+                  reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+                  sa=False, sym=False, norm_type=<NormType.Batch: 1>,
+                  act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                  ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                  padding=None, bias=None, bn_1st=True, transpose=False,
+                  init='auto', xtra=None, bias_std=0.01,
+                  dilation:Union[int,Tuple[int,int]]=1,
+                  padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d34plus

+
+
 xresnet1d34plus (c_in, c_out, seq_len=None, act=<class
+                  'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,
+                  reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+                  sa=False, sym=False, norm_type=<NormType.Batch: 1>,
+                  act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                  ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                  padding=None, bias=None, bn_1st=True, transpose=False,
+                  init='auto', xtra=None, bias_std=0.01,
+                  dilation:Union[int,Tuple[int,int]]=1,
+                  padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+

source

+
+
+

xresnet1d18plus

+
+
 xresnet1d18plus (c_in, c_out, seq_len=None, act=<class
+                  'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,
+                  reduction=None, nh1=None, nh2=None, dw=False, g2=1,
+                  sa=False, sym=False, norm_type=<NormType.Batch: 1>,
+                  act_cls=<class 'torch.nn.modules.activation.ReLU'>,
+                  ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,
+                  padding=None, bias=None, bn_1st=True, transpose=False,
+                  init='auto', xtra=None, bias_std=0.01,
+                  dilation:Union[int,Tuple[int,int]]=1,
+                  padding_mode:str='zeros', device=None, dtype=None)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
c_in
c_out
seq_lenNoneTypeNone
acttypeReLU
strideint1
groupsint1
reductionNoneTypeNone
nh1NoneTypeNone
nh2NoneTypeNone
dwboolFalse
g2int1
saboolFalse
symboolFalse
norm_typeNormTypeNormType.Batch
act_clstypeReLU
ndimint2
ksint3
poolfunctionAvgPool
pool_firstboolTrue
paddingNoneTypeNone
biasNoneTypeNone
bn_1stboolTrue
transposeboolFalse
initstrauto
xtraNoneTypeNone
bias_stdfloat0.01
dilationtyping.Union[int, typing.Tuple[int, int]]1
padding_modestrzerosTODO: refine this type
deviceNoneTypeNone
dtypeNoneTypeNone
+
+
net = xresnet1d18plus(3, 2, coord=True)
+x = torch.rand(32, 3, 50)
+net(x)
+
+
block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2]
+
+
+
TensorBase([[ 0.1829,  0.3597],
+            [ 0.0274, -0.1443],
+            [ 0.0240, -0.2374],
+            [-0.1323, -0.6574],
+            [ 0.1481, -0.1438],
+            [ 0.2410, -0.1225],
+            [-0.1186, -0.1978],
+            [-0.0640, -0.4547],
+            [-0.0229, -0.3214],
+            [ 0.2336, -0.4466],
+            [-0.1843, -0.0934],
+            [-0.0416,  0.1997],
+            [-0.0109, -0.0253],
+            [ 0.3014, -0.2193],
+            [ 0.0966,  0.0602],
+            [ 0.2364,  0.2209],
+            [-0.1437, -0.1476],
+            [ 0.0070, -0.2900],
+            [ 0.2807,  0.4797],
+            [-0.2386, -0.1563],
+            [ 0.1620, -0.2285],
+            [ 0.0479, -0.2348],
+            [ 0.1573, -0.4420],
+            [-0.5469,  0.1512],
+            [ 0.0243, -0.1806],
+            [ 0.3396,  0.1434],
+            [ 0.0666, -0.1644],
+            [ 0.3286, -0.5637],
+            [ 0.0993, -0.6281],
+            [-0.1068, -0.0763],
+            [-0.2713,  0.1946],
+            [-0.1416, -0.4043]], grad_fn=<AliasBackward0>)
+
+
+
+
bs, c_in, seq_len = 2, 4, 32
+c_out = 2
+x = torch.rand(bs, c_in, seq_len)
+archs = [
+    xresnet1d18plus, xresnet1d34plus, xresnet1d50plus, 
+    xresnet1d18_deepplus, xresnet1d34_deepplus, xresnet1d50_deepplus, xresnet1d18_deeperplus,
+    xresnet1d34_deeperplus, xresnet1d50_deeperplus
+#     # Long test
+#     xresnet1d101, xresnet1d152,
+]
+for i, arch in enumerate(archs):
+    print(i, arch.__name__)
+    test_eq(arch(c_in, c_out, sa=True, act=Mish, coord=True)(x).shape, (bs, c_out))
+
+
0 xresnet1d18plus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2]
+1 xresnet1d34plus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3]
+2 xresnet1d50plus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3]
+3 xresnet1d18_deepplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2, 1, 1]
+4 xresnet1d34_deepplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3, 1, 1]
+5 xresnet1d50_deepplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3, 1, 1]
+6 xresnet1d18_deeperplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 1, 1, 1, 1, 1, 1]
+7 xresnet1d34_deeperplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3, 1, 1, 1, 1]
+8 xresnet1d50_deeperplus
+block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3, 1, 1, 1, 1]
+
+
+
+
m = xresnet1d34plus(4, 2, act=Mish)
+test_eq(len(get_layers(m, is_bn)), 38)
+test_eq(check_weight(m, is_bn)[0].sum(), 22)
+
+
block <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3]
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/optimizer.html b/optimizer.html new file mode 100644 index 000000000..53290dca8 --- /dev/null +++ b/optimizer.html @@ -0,0 +1,1265 @@ + + + + + + + + + +tsai - Optimizers + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Optimizers

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

This contains a set of optimizers.

+
+
+

source

+
+

wrap_optimizer

+
+
 wrap_optimizer (opt, **kwargs)
+
+

You can natively use any of the optimizers included in the fastai library. You just need to pass it to the learner as the opt_func.

+

In addition, you will be able to use any of the optimizers from:

+
    +
  • Pytorch
  • +
  • torch_optimizer (https://github.com/jettify/pytorch-optimizer). In this case, you will need to install torch-optimizer first)
  • +
+

Examples of use:

+
adamw = wrap_optimizer(torch.optim.AdamW)
+
import torch_optimizer as optim
+adabelief = wrap_optimizer(optim.AdaBelief)
+

If you want to use any these last 2, you can use the wrap_optimizer function. Here are a few examples:

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/optuna.html b/optuna.html new file mode 100644 index 000000000..f27452a89 --- /dev/null +++ b/optuna.html @@ -0,0 +1,1368 @@ + + + + + + + + + +tsai - Optuna + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Optuna

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

A hyperparameter optimization framework

+
+

Optuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning. It features an imperative, define-by-run style user API. Thanks to our define-by-run API, the code written with Optuna enjoys high modularity, and the user of Optuna can dynamically construct the search spaces for the hyperparameters.

+
+
def run_optuna_study(objective, resume=None, study_type=None, multivariate=True, search_space=None, evaluate=None, seed=None, sampler=None, pruner=None, 
+                     study_name=None, direction='maximize', n_trials=None, timeout=None, gc_after_trial=False, show_progress_bar=True, 
+                     save_study=True, path='optuna', show_plots=True):
+    r"""Creates and runs an optuna study.
+
+    Args: 
+        objective:          A callable that implements objective function.
+        resume:             Path to a previously saved study.
+        study_type:         Type of study selected (bayesian, gridsearch, randomsearch). Based on this a sampler will be build if sampler is None. 
+                            If a sampler is passed, this has no effect.
+        multivariate:       If this is True, the multivariate TPE is used when suggesting parameters. The multivariate TPE is reported to outperform 
+                            the independent TPE.
+        search_space:       Search space required when running a gridsearch (if you don't pass a sampler).
+        evaluate:           Allows you to pass a specific set of hyperparameters that will be evaluated.
+        seed:               Fixed seed used by samplers.
+        sampler:            A sampler object that implements background algorithm for value suggestion. If None is specified, TPESampler is used during 
+                            single-objective optimization and NSGAIISampler during multi-objective optimization. See also samplers.
+        pruner:             A pruner object that decides early stopping of unpromising trials. If None is specified, MedianPruner is used as the default. 
+                            See also pruners.
+        study_name:         Study’s name. If this argument is set to None, a unique name is generated automatically.
+        direction:          A sequence of directions during multi-objective optimization.
+        n_trials:           The number of trials. If this argument is set to None, there is no limitation on the number of trials. If timeout is also set to 
+                            None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM.
+        timeout:            Stop study after the given number of second(s). If this argument is set to None, the study is executed without time limitation. 
+                            If n_trials is also set to None, the study continues to create trials until it receives a termination signal such as 
+                            Ctrl+C or SIGTERM.
+        gc_after_trial:     Flag to execute garbage collection at the end of each trial. By default, garbage collection is enabled, just in case. 
+                            You can turn it off with this argument if memory is safely managed in your objective function.
+        show_progress_bar:  Flag to show progress bars or not. To disable progress bar, set this False.
+        save_study:         Save your study when finished/ interrupted.
+        path:               Folder where the study will be saved.
+        show_plots:         Flag to control whether plots are shown at the end of the study.
+    """
+    
+    try: import optuna
+    except ImportError: raise ImportError('You need to install optuna to use run_optuna_study')
+
+    # Sampler
+    if sampler is None:
+        if study_type is None or "bayes" in study_type.lower(): 
+            sampler = optuna.samplers.TPESampler(seed=seed, multivariate=multivariate)
+        elif "grid" in study_type.lower():
+            assert search_space, f"you need to pass a search_space dict to run a gridsearch"
+            sampler = optuna.samplers.GridSampler(search_space)
+        elif "random" in study_type.lower(): 
+            sampler = optuna.samplers.RandomSampler(seed=seed)
+    assert sampler, "you need to either select a study type (bayesian, gridsampler, randomsampler) or pass a sampler"
+
+    # Study
+    if resume: 
+        try:
+            study = joblib.load(resume)
+        except: 
+            print(f"joblib.load({resume}) couldn't recover any saved study. Check the path.")
+            return
+        print("Best trial until now:")
+        print(" Value: ", study.best_trial.value)
+        print(" Params: ")
+        for key, value in study.best_trial.params.items():
+            print(f"    {key}: {value}")
+    else: 
+        study = optuna.create_study(sampler=sampler, pruner=pruner, study_name=study_name, direction=direction)
+    if evaluate: study.enqueue_trial(evaluate)
+    try:
+        study.optimize(objective, n_trials=n_trials, timeout=timeout, gc_after_trial=gc_after_trial, show_progress_bar=show_progress_bar)
+    except KeyboardInterrupt:
+        pass
+
+    # Save
+    if save_study:
+        full_path = Path(path)/f'{study.study_name}.pkl'
+        full_path.parent.mkdir(parents=True, exist_ok=True)
+        joblib.dump(study, full_path)
+        print(f'\nOptuna study saved to {full_path}')
+        print(f"To reload the study run: study = joblib.load('{full_path}')")
+
+    # Plots
+    if show_plots and len(study.trials) > 1:
+        try: display(optuna.visualization.plot_optimization_history(study))
+        except: pass
+        try: display(optuna.visualization.plot_param_importances(study))
+        except: pass
+        try: display(optuna.visualization.plot_slice(study))
+        except: pass
+        try: display(optuna.visualization.plot_parallel_coordinate(study))
+        except: pass
+
+    # Study stats
+    try:
+        pruned_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED]
+        complete_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
+        print(f"\nStudy statistics    : ")
+        print(f"  Study name        : {study.study_name}")
+        print(f"  # finished trials : {len(study.trials)}")
+        print(f"  # pruned trials   : {len(pruned_trials)}")
+        print(f"  # complete trials : {len(complete_trials)}")
+        
+        print(f"\nBest trial          :")
+        trial = study.best_trial
+        print(f"  value             : {trial.value}")
+        print(f"  best_params = {trial.params}\n")
+    except:
+        print('\nNo finished trials yet.')
+    return study
+
+
+

source

+
+

run_optuna_study

+
+
 run_optuna_study (objective, resume=None, study_type=None,
+                   multivariate=True, search_space=None, evaluate=None,
+                   seed=None, sampler=None, pruner=None, study_name=None,
+                   direction='maximize', n_trials=None, timeout=None,
+                   gc_after_trial=False, show_progress_bar=True,
+                   save_study=True, path='optuna', show_plots=True)
+
+

Creates and runs an optuna study.

+

Args: objective: A callable that implements objective function. resume: Path to a previously saved study. study_type: Type of study selected (bayesian, gridsearch, randomsearch). Based on this a sampler will be build if sampler is None. If a sampler is passed, this has no effect. multivariate: If this is True, the multivariate TPE is used when suggesting parameters. The multivariate TPE is reported to outperform the independent TPE. search_space: Search space required when running a gridsearch (if you don’t pass a sampler). evaluate: Allows you to pass a specific set of hyperparameters that will be evaluated. seed: Fixed seed used by samplers. sampler: A sampler object that implements background algorithm for value suggestion. If None is specified, TPESampler is used during single-objective optimization and NSGAIISampler during multi-objective optimization. See also samplers. pruner: A pruner object that decides early stopping of unpromising trials. If None is specified, MedianPruner is used as the default. See also pruners. study_name: Study’s name. If this argument is set to None, a unique name is generated automatically. direction: A sequence of directions during multi-objective optimization. n_trials: The number of trials. If this argument is set to None, there is no limitation on the number of trials. If timeout is also set to None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM. timeout: Stop study after the given number of second(s). If this argument is set to None, the study is executed without time limitation. If n_trials is also set to None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM. gc_after_trial: Flag to execute garbage collection at the end of each trial. By default, garbage collection is enabled, just in case. You can turn it off with this argument if memory is safely managed in your objective function. show_progress_bar: Flag to show progress bars or not. To disable progress bar, set this False. save_study: Save your study when finished/ interrupted. path: Folder where the study will be saved. show_plots: Flag to control whether plots are shown at the end of the study.

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/robots.txt b/robots.txt new file mode 100644 index 000000000..c98c88a24 --- /dev/null +++ b/robots.txt @@ -0,0 +1 @@ +Sitemap: https://timeseriesAI.github.io/tsai/sitemap.xml diff --git a/search.json b/search.json new file mode 100644 index 000000000..da5aed71f --- /dev/null +++ b/search.json @@ -0,0 +1,1504 @@ +[ + { + "objectID": "models.tstplus.html", + "href": "models.tstplus.html", + "title": "TSTPlus", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@timeseriesAI.co based on:\nThis implementation is adapted to work with the rest of the tsai library, and contain some hyperparameters that are not available in the original implementation. I included them for experimenting.", + "crumbs": [ + "Models", + "Transformers", + "TSTPlus" + ] + }, + { + "objectID": "models.tstplus.html#imports", + "href": "models.tstplus.html#imports", + "title": "TSTPlus", + "section": "Imports", + "text": "Imports", + "crumbs": [ + "Models", + "Transformers", + "TSTPlus" + ] + }, + { + "objectID": "models.tstplus.html#tst", + "href": "models.tstplus.html#tst", + "title": "TSTPlus", + "section": "TST", + "text": "TST\n\nt = torch.rand(16, 50, 128)\nattn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len\nkey_padding_mask = torch.zeros(16, 50)\nkey_padding_mask[[1, 3, 6, 15], -10:] = 1\nkey_padding_mask = key_padding_mask.bool()\nprint('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)\nencoder = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=8, d_k=None, d_v=None, d_ff=512, attn_dropout=0., dropout=0.1, store_attn=True, activation='gelu')\noutput = encoder(t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\noutput.shape\n\nattn_mask torch.Size([50, 50]) key_padding_mask torch.Size([16, 50])\n\n\ntorch.Size([16, 50, 128])\n\n\n\ncmap='viridis'\nfigsize=(6,5)\nplt.figure(figsize=figsize)\nplt.pcolormesh(encoder.attn[0][0].detach().cpu().numpy(), cmap=cmap)\nplt.title('Self-attention map')\nplt.colorbar()\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\nTSTPlus\n\n TSTPlus (c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=512,\n n_layers:int=3, d_model:int=128, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256,\n norm:str='BatchNorm', attn_dropout:float=0.0, dropout:float=0.0,\n act:str='gelu', key_padding_mask:bool='auto',\n padding_var:Optional[int]=None,\n attn_mask:Optional[torch.Tensor]=None, res_attention:bool=True,\n pre_norm:bool=False, store_attn:bool=False, pe:str='zeros',\n learn_pe:bool=True, flatten:bool=True, fc_dropout:float=0.0,\n concat_pool:bool=False, bn:bool=False,\n custom_head:Optional[Callable]=None,\n y_range:Optional[tuple]=None, verbose:bool=False, **kwargs)\n\nTST (Time Series Transformer) is a Transformer that takes continuous time series as inputs\n\nfrom tsai.models.utils import build_ts_model\n\n\nbs = 8\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 1_500\n\nxb = torch.randn(bs, c_in, seq_len).to(device)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nmax_seq_len = 256\nd_model = 128\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\nnorm = \"BatchNorm\"\ndropout = 0.1\nactivation = \"gelu\"\nn_layers = 3\nfc_dropout = 0.1\npe = None\nlearn_pe = True\nkwargs = {}\n\nmodel = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm, dropout=dropout, activation=activation, n_layers=n_layers,\n fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device)\ntest_eq(model(xb).shape, [bs, c_out])\ntest_eq(model[0], model.backbone)\ntest_eq(model[1], model.head)\nmodel2 = build_ts_model(TSTPlus, c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, norm=norm, dropout=dropout, activation=activation, n_layers=n_layers,\n fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs).to(device)\ntest_eq(model2(xb).shape, [bs, c_out])\ntest_eq(model2[0], model2.backbone)\ntest_eq(model2[1], model2.head)\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 470018\n\n\n\nkey_padding_mask = torch.sort(torch.randint(0, 2, (bs, max_seq_len))).values.bool().to(device)\nkey_padding_mask[0]\n\ntensor([False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True])\n\n\n\nmodel2.key_padding_mask = True\nmodel2.to(device)((xb, key_padding_mask)).shape\n\ntorch.Size([8, 2])\n\n\n\nmodel.head\n\nSequential(\n (0): GELU(approximate='none')\n (1): fastai.layers.Flatten(full=False)\n (2): LinBnDrop(\n (0): Dropout(p=0.1, inplace=False)\n (1): Linear(in_features=32768, out_features=2, bias=True)\n )\n)\n\n\n\nmodel = TSTPlus(c_in, c_out, seq_len, pre_norm=True)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\n\n\nbs = 8\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 5000\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\nmodel = TSTPlus(c_in, c_out, seq_len, res_attention=True)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 605698\n\n\n\ncustom_head = partial(create_pool_head, concat_pool=True)\nmodel = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,\n fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 421122\n\n\n\ncustom_head = partial(create_pool_plus_head, concat_pool=True)\nmodel = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,\n fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, flatten=False, custom_head=custom_head, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 554240\n\n\n\nbs = 8\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nmax_seq_len = 120\nd_model = 128\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\ndropout = 0.1\nact = \"gelu\"\nn_layers = 3\nfc_dropout = 0.1\npe='zeros'\nlearn_pe=True\nkwargs = {}\n# kwargs = dict(kernel_size=5, padding=2)\n\nmodel = TSTPlus(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, act=act, n_layers=n_layers,\n fc_dropout=fc_dropout, pe=pe, learn_pe=learn_pe, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\nbody, head = model[0], model[1]\ntest_eq(body.to(xb.device)(xb).ndim, 3)\ntest_eq(head.to(xb.device)(body.to(xb.device)(xb)).ndim, 2)\nhead\n\nmodel parameters: 421762\n\n\nSequential(\n (0): GELU(approximate='none')\n (1): fastai.layers.Flatten(full=False)\n (2): LinBnDrop(\n (0): Dropout(p=0.1, inplace=False)\n (1): Linear(in_features=7680, out_features=2, bias=True)\n )\n)\n\n\n\nmodel.show_pe()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmodel = TSTPlus(3, 2, 10)\nxb = torch.randn(4, 3, 10)\nyb = torch.randint(0, 2, (4,))\ntest_eq(model.backbone._key_padding_mask(xb)[1], None)\nrandom_idxs = random_choice(len(xb), 2, False)\nxb[random_idxs, :, -5:] = np.nan\nxb[random_idxs, 0, 1] = np.nan\ntest_eq(model.backbone._key_padding_mask(xb.clone())[1].data, (torch.isnan(xb).float().mean(1)==1).bool())\ntest_eq(model.backbone._key_padding_mask(xb.clone())[1].data.shape, (4,10))\nprint(torch.isnan(xb).sum())\npred = model.to(xb.device)(xb.clone())\nloss = CrossEntropyLossFlat()(pred, yb)\nloss.backward()\nmodel.to(xb.device).backbone._key_padding_mask(xb)[1].data.shape\n\ntensor(32)\n\n\ntorch.Size([4, 10])\n\n\n\nbs = 4\nc_in = 3\nseq_len = 10\nc_out = 2\nxb = torch.randn(bs, c_in, seq_len)\nxb[:, -1] = torch.randint(0, 2, (bs, seq_len)).sort()[0]\nmodel = TSTPlus(c_in, c_out, seq_len).to(xb.device)\ntest_eq(model.backbone._key_padding_mask(xb)[1], None)\nmodel = TSTPlus(c_in, c_out, seq_len, padding_var=-1).to(xb.device)\ntest_eq(model.backbone._key_padding_mask(xb)[1], (xb[:, -1]==1))\nmodel = TSTPlus(c_in, c_out, seq_len, padding_var=2).to(xb.device)\ntest_eq(model.backbone._key_padding_mask(xb)[1], (xb[:, -1]==1))\ntest_eq(model(xb).shape, (bs, c_out))\n\n\nbs = 4\nc_in = 3\nseq_len = 10\nc_out = 2\nxb = torch.randn(bs, c_in, seq_len)\nmodel = TSTPlus(c_in, c_out, seq_len, act='smelu')\n\n\nsource\n\n\nMultiTSTPlus\n\n MultiTSTPlus (feat_list, c_out, seq_len, max_seq_len:Optional[int]=512,\n custom_head=None, n_layers:int=3, d_model:int=128,\n n_heads:int=16, d_k:Optional[int]=None,\n d_v:Optional[int]=None, d_ff:int=256, norm:str='BatchNorm',\n attn_dropout:float=0.0, dropout:float=0.0, act:str='gelu',\n key_padding_mask:bool='auto',\n padding_var:Optional[int]=None,\n attn_mask:Optional[torch.Tensor]=None,\n res_attention:bool=True, pre_norm:bool=False,\n store_attn:bool=False, pe:str='zeros', learn_pe:bool=True,\n flatten:bool=True, fc_dropout:float=0.0,\n concat_pool:bool=False, bn:bool=False,\n y_range:Optional[tuple]=None, verbose:bool=False)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nbs = 8\nc_in = 7 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 10\nxb2 = torch.randn(bs, c_in, seq_len)\nmodel1 = MultiTSTPlus([2, 5], c_out, seq_len)\nmodel2 = MultiTSTPlus(7, c_out, seq_len)\ntest_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))\ntest_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)\ntest_eq(count_parameters(model1) > count_parameters(model2), True)\n\n\nbs = 8\nc_in = 7 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 10\nxb2 = torch.randn(bs, c_in, seq_len)\nmodel1 = MultiTSTPlus([2, 5], c_out, seq_len, )\nmodel2 = MultiTSTPlus([[0,2,5], [0,1,3,4,6]], c_out, seq_len)\ntest_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))\ntest_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)\n\n\nmodel1 = MultiTSTPlus([2, 5], c_out, seq_len, y_range=(0.5, 5.5))\nbody, head = split_model(model1)\ntest_eq(body.to(xb2.device)(xb2).ndim, 3)\ntest_eq(head.to(xb2.device)(body.to(xb2.device)(xb2)).ndim, 2)\nhead\n\nSequential(\n (0): Sequential(\n (0): GELU(approximate='none')\n (1): fastai.layers.Flatten(full=False)\n (2): LinBnDrop(\n (0): Linear(in_features=2560, out_features=2, bias=True)\n )\n )\n)\n\n\n\nmodel = MultiTSTPlus([2, 5], c_out, seq_len, pre_norm=True)\n\n\nbs = 8\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\nnet = MultiTSTPlus(n_vars, c_out, seq_len)\nchange_model_head(net, create_pool_plus_head, concat_pool=False)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([8, 2])\n\n\nSequential(\n (0): AdaptiveAvgPool1d(output_size=1)\n (1): Reshape(bs)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Linear(in_features=128, out_features=512, bias=False)\n (4): ReLU(inplace=True)\n (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (6): Linear(in_features=512, out_features=2, bias=False)\n)\n\n\n\nbs = 8\nn_vars = 3\nseq_len = 12\nc_out = 10\nxb = torch.rand(bs, n_vars, seq_len)\nnew_head = partial(conv_lin_nd_head, d=(5 ,2))\nnet = MultiTSTPlus(n_vars, c_out, seq_len, custom_head=new_head)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([8, 5, 2, 10])\n\n\nSequential(\n (0): create_conv_lin_nd_head(\n (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))\n (1): Linear(in_features=12, out_features=10, bias=True)\n (2): Transpose(-1, -2)\n (3): Reshape(bs, 5, 2, 10)\n )\n)", + "crumbs": [ + "Models", + "Transformers", + "TSTPlus" + ] + }, + { + "objectID": "tutorials.html", + "href": "tutorials.html", + "title": "Tutorial notebooks", + "section": "", + "text": "A number of tutorials have been created to help you get started to use tsai with time series data. Please, feel free to open the notebooks (you can open them in Colab if you want) and tweak them to do your own experiments.", + "crumbs": [ + "Tutorial notebooks" + ] + }, + { + "objectID": "tutorials.html#time-series-classification-using-raw-data", + "href": "tutorials.html#time-series-classification-using-raw-data", + "title": "Tutorial notebooks", + "section": "Time series classification (using raw data)", + "text": "Time series classification (using raw data)\nI’d recommend you to start with:\n\nIntroduction to Time Series Classification. This notebook contains a detailed walk through the steps to perform time series classification.\n\n\nData preparation:\nIf you need help preparing your data you may find the following tutorials useful:\n\nTime Series data preparation: this will show how you can do classify both univariate or multivariate time series.\nHow to work with (very) large numpy arrays in tsai?\nHow to use numpy arrays in tsai?\n\nThese last 2 provide more details in case you need them. They explain how datasets and dataloaders are created.\n\n\nTypes of architectures:\nOnce you feel comfortable, you can start exploring different types of architectures:\n\nYou can use the Time Series data preparation notebook and replace the InceptionTime architecture by any other of your choice:\n\nMLPs\nRNNs (LSTM, GRU)\nCNNs (FCN, ResNet, XResNet)\nWavelet-based architectures\nTransformers (like TST - 2020)\nThey all (except ROCKET) work in the same way, for univariate or multivariate time series.\n\nHow to use Transformers with Time Series? may also help you understand how to successfully apply this new type of architecture to time series.\nYou can also use Time Series Classification Benchmark to perform bechmarks with different architectures and/ or configurations.\n\nROCKET (2019) is a new technique used to generate 10-20k features from time series. These features are used in a different classifier. This is the only implementation I’m aware of that uses GPU and allows both univariate and multivariate time series. To explain this method that works very well in many cases you can use the following notebook:\n\nROCKET: a new state-of-the-art time series classifier\n\nThere are many types of classifiers as you can see, and it’s very difficult to know in advance which one will perform well in our task. However, the ones that have consistently deliver the best results in recent benchmark studies are Inceptiontime (Fawaz, 2019) and ROCKET (Dempster, 2019). Transformers, like TST (Zerveas, 2020), also show a lot of promise, but the application to time series data is so new that they have not been benchmarked against other architectures. But I’d say these are 3 architectures you should know well.", + "crumbs": [ + "Tutorial notebooks" + ] + }, + { + "objectID": "tutorials.html#time-series-classification-using-time-series-images", + "href": "tutorials.html#time-series-classification-using-time-series-images", + "title": "Tutorial notebooks", + "section": "Time series classification (using time series images)", + "text": "Time series classification (using time series images)\nIn these tutorials, I’ve also included a section on how to transform time series into images. This will allow you to then use DL vision models like ResNet50 for example. This approach works very well in some cases, even if you have limited data. You can learn about this technique in this notebook:\n\nImaging time series", + "crumbs": [ + "Tutorial notebooks" + ] + }, + { + "objectID": "tutorials.html#time-series-regression", + "href": "tutorials.html#time-series-regression", + "title": "Tutorial notebooks", + "section": "Time series regression", + "text": "Time series regression\nI’ve also included an example of how you can perform time series regression with your time series using tsai. In this case, the label will be continuous, instead of a category. But as you will see, the use is almost identical to time series classification. You can learn more about this here:\n\nTime series regression", + "crumbs": [ + "Tutorial notebooks" + ] + }, + { + "objectID": "tutorials.html#visualization", + "href": "tutorials.html#visualization", + "title": "Tutorial notebooks", + "section": "Visualization", + "text": "Visualization\nI’ve also created PredictionDynamics callback that allows you to visualize the model’s predictions while it’s training. It can provide you some additional insights that may be useful to improve your model. Here’s the notebook:\n\nPredictionDynamics\n\nI hope you will find these tutorial useful. I’m planning to add more tutorials to demonstrate new techniques, models, etc when they become available. So stay tuned!", + "crumbs": [ + "Tutorial notebooks" + ] + }, + { + "objectID": "models.gatedtabtransformer.html", + "href": "models.gatedtabtransformer.html", + "title": "GatedTabTransformer", + "section": "", + "text": "This implementation is based on:\n\nCholakov, R., & Kolev, T. (2022). The GatedTabTransformer. An enhanced deep learning architecture for tabular modeling. arXiv preprint arXiv:2201.00199. arXiv preprint https://arxiv.org/abs/2201.00199\nHuang, X., Khetan, A., Cvitkovic, M., & Karnin, Z. (2020). TabTransformer: Tabular Data Modeling Using Contextual Embeddings. arXiv preprint https://arxiv.org/pdf/2012.06678\n\nOfficial repo: https://github.com/radi-cho/GatedTabTransformer\n\nsource\n\nGatedTabTransformer\n\n GatedTabTransformer (classes, cont_names, c_out, column_embed=True,\n add_shared_embed=False, shared_embed_div=8,\n embed_dropout=0.1, drop_whole_embed=False,\n d_model=32, n_layers=6, n_heads=8, d_k=None,\n d_v=None, d_ff=None, res_attention=True,\n attention_act='gelu', res_dropout=0.1,\n norm_cont=True, mlp_d_model=32, mlp_d_ffn=64,\n mlp_layers=4)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nfrom fastcore.test import test_eq\nfrom fastcore.basics import first\nfrom fastai.data.external import untar_data, URLs\nfrom fastai.tabular.data import TabularDataLoaders\nfrom fastai.tabular.core import Categorify, FillMissing\nfrom fastai.data.transforms import Normalize\nimport pandas as pd\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\ndls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names=\"salary\",\n cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],\n cont_names = ['age', 'fnlwgt', 'education-num'],\n procs = [Categorify, FillMissing, Normalize])\nx_cat, x_cont, yb = first(dls.train)\nmodel = GatedTabTransformer(dls.classes, dls.cont_names, dls.c)\ntest_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))", + "crumbs": [ + "Models", + "Tabular models", + "GatedTabTransformer" + ] + }, + { + "objectID": "models.xcmplus.html", + "href": "models.xcmplus.html", + "title": "XCMPlus", + "section": "", + "text": "This is an unofficial PyTorch implementation of XCM created by Ignacio Oguiza (oguiza@timeseriesAI.co).\n\nsource\n\nXCMPlus\n\n XCMPlus (c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128,\n window_perc:float=1.0, flatten:bool=False, custom_head:<built-\n infunctioncallable>=None, concat_pool:bool=False,\n fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None,\n **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.data.basics import *\nfrom tsai.learner import *\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntfms = [None, TSCategorize()]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms)\nmodel = XCMPlus(dls.vars, dls.c, dls.len)\nlearn = ts_learner(dls, model, metrics=accuracy)\nxb, yb = dls.one_batch()\n\nbs, c_in, seq_len = xb.shape\nc_out = len(np.unique(yb.cpu().numpy()))\n\nmodel = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = XCMPlus(c_in, c_out, seq_len, concat_pool=True)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = XCMPlus(c_in, c_out, seq_len)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel\n\nXCMPlus(\n (backbone): _XCMPlus_Backbone(\n (conv2dblock): Sequential(\n (0): Unsqueeze(dim=1)\n (1): Conv2dSame(\n (conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))\n )\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (conv2d1x1block): Sequential(\n (0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))\n (1): ReLU()\n (2): Squeeze(dim=1)\n )\n (conv1dblock): Sequential(\n (0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (conv1d1x1block): Sequential(\n (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))\n (1): ReLU()\n )\n (concat): Concat(dim=1)\n (conv1d): Sequential(\n (0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n )\n (head): Sequential(\n (0): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=128, out_features=6, bias=True)\n )\n )\n)\n\n\n\nmodel.show_gradcam(xb, yb)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmodel.show_gradcam(xb[0], yb[0])\n\n[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 1\nxb = torch.rand(bs, n_vars, seq_len)\nnew_head = partial(conv_lin_nd_head, d=(5, 2))\nnet = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 5, 2])\n\n\ncreate_conv_lin_nd_head(\n (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))\n (1): Linear(in_features=12, out_features=10, bias=True)\n (2): Transpose(-1, -2)\n (3): Reshape(bs, 5, 2)\n)\n\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\nnet = XCMPlus(n_vars, c_out, seq_len)\nchange_model_head(net, create_pool_plus_head, concat_pool=False)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 2])\n\n\nSequential(\n (0): AdaptiveAvgPool1d(output_size=1)\n (1): Reshape(bs)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Linear(in_features=128, out_features=512, bias=False)\n (4): ReLU(inplace=True)\n (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (6): Linear(in_features=512, out_features=2, bias=False)\n)", + "crumbs": [ + "Models", + "CNNs", + "XCMPlus" + ] + }, + { + "objectID": "models.minirocket_pytorch.html", + "href": "models.minirocket_pytorch.html", + "title": "MINIROCKET Pytorch", + "section": "", + "text": "A Very Fast (Almost) Deterministic Transform for Time Series Classification.\n\nThis is a Pytorch implementation of MiniRocket developed by Malcolm McLean and Ignacio Oguiza based on:\nDempster, A., Schmidt, D. F., & Webb, G. I. (2020). MINIROCKET: A Very Fast (Almost) Deterministic Transform for Time Series Classification. arXiv preprint arXiv:2012.08791.\nOriginal paper: https://arxiv.org/abs/2012.08791\nOriginal code: https://github.com/angus924/minirocket\n\nsource\n\nMiniRocketFeatures\n\n MiniRocketFeatures (c_in, seq_len, num_features=10000,\n max_dilations_per_kernel=32, random_state=None)\n\nThis is a Pytorch implementation of MiniRocket developed by Malcolm McLean and Ignacio Oguiza\nMiniRocket paper citation: @article{dempster_etal_2020, author = {Dempster, Angus and Schmidt, Daniel F and Webb, Geoffrey I}, title = {{MINIROCKET}: A Very Fast (Almost) Deterministic Transform for Time Series Classification}, year = {2020}, journal = {arXiv:2012.08791} } Original paper: https://arxiv.org/abs/2012.08791 Original code: https://github.com/angus924/minirocket\n\nsource\n\n\nget_minirocket_features\n\n get_minirocket_features (o, model, chunksize=1024, use_cuda=None,\n to_np=True)\n\nFunction used to split a large dataset into chunks, avoiding OOM error.\n\nsource\n\n\nMiniRocketHead\n\n MiniRocketHead (c_in, c_out, seq_len=1, bn=True, fc_dropout=0.0)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nMiniRocket\n\n MiniRocket (c_in, c_out, seq_len, num_features=10000,\n max_dilations_per_kernel=32, random_state=None, bn=True,\n fc_dropout=0)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.imports import default_device\nfrom fastai.metrics import accuracy\nfrom fastai.callback.tracker import ReduceLROnPlateau\nfrom tsai.data.all import *\nfrom tsai.learner import *\n\n\n# Offline feature calculation\ndsid = 'ECGFiveDays'\nX, y, splits = get_UCR_data(dsid, split_data=False)\nmrf = MiniRocketFeatures(c_in=X.shape[1], seq_len=X.shape[2]).to(default_device())\nX_train = X[splits[0]] # X_train may either be a np.ndarray or a torch.Tensor\nmrf.fit(X_train)\nX_tfm = get_minirocket_features(X, mrf)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize(by_var=True)\ndls = get_ts_dls(X_tfm, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)\nlearn = ts_learner(dls, MiniRocketHead, metrics=accuracy)\nlearn.fit(1, 1e-4, cbs=ReduceLROnPlateau(factor=0.5, min_lr=1e-8, patience=10))\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n0.693147\n0.530879\n0.752613\n00:00\n\n\n\n\n\n\n# Online feature calculation\ndsid = 'ECGFiveDays'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)\nlearn = ts_learner(dls, MiniRocket, metrics=accuracy)\nlearn.fit_one_cycle(1, 1e-2)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n0.693147\n0.713297\n0.502904\n00:06", + "crumbs": [ + "Models", + "ROCKETs", + "MINIROCKET Pytorch" + ] + }, + { + "objectID": "models.rnn.html", + "href": "models.rnn.html", + "title": "RNNs", + "section": "", + "text": "These are RNN, LSTM and GRU PyTorch implementations created by Ignacio Oguiza - oguiza@timeseriesAI.co\nsource", + "crumbs": [ + "Models", + "RNNs", + "RNNs" + ] + }, + { + "objectID": "models.rnn.html#converting-a-model-to-torchscript", + "href": "models.rnn.html#converting-a-model-to-torchscript", + "title": "RNNs", + "section": "Converting a model to TorchScript", + "text": "Converting a model to TorchScript\n\nmodel = LSTM(c_in, c_out, hidden_size=100, n_layers=2, bidirectional=True, rnn_dropout=.5, fc_dropout=.5)\nmodel.eval()\ninp = torch.rand(1, c_in, 50)\noutput = model(inp)\nprint(output)\n\ntensor([[-0.0287, -0.0105]], grad_fn=<AddmmBackward0>)\n\n\n\nTracing\n\n# save to gpu, cpu or both\ntraced_cpu = torch.jit.trace(model.cpu(), inp)\nprint(traced_cpu)\ntorch.jit.save(traced_cpu, \"cpu.pt\")\n\n# load cpu or gpu model\ntraced_cpu = torch.jit.load(\"cpu.pt\")\ntest_eq(traced_cpu(inp), output)\n\n!rm \"cpu.pt\"\n\nLSTM(\n original_name=LSTM\n (rnn): LSTM(original_name=LSTM)\n (dropout): Dropout(original_name=Dropout)\n (fc): Linear(original_name=Linear)\n)\n\n\n\n\nScripting\n\n# save to gpu, cpu or both\nscripted_cpu = torch.jit.script(model.cpu())\nprint(scripted_cpu)\ntorch.jit.save(scripted_cpu, \"cpu.pt\")\n\n# load cpu or gpu model\nscripted_cpu = torch.jit.load(\"cpu.pt\")\ntest_eq(scripted_cpu(inp), output)\n\n!rm \"cpu.pt\"\n\nRecursiveScriptModule(\n original_name=LSTM\n (rnn): RecursiveScriptModule(original_name=LSTM)\n (dropout): RecursiveScriptModule(original_name=Dropout)\n (fc): RecursiveScriptModule(original_name=Linear)\n)", + "crumbs": [ + "Models", + "RNNs", + "RNNs" + ] + }, + { + "objectID": "models.rnn.html#converting-a-model-to-onnx", + "href": "models.rnn.html#converting-a-model-to-onnx", + "title": "RNNs", + "section": "Converting a model to ONNX", + "text": "Converting a model to ONNX\nimport onnx\n\n# Export the model\ntorch.onnx.export(model.cpu(), # model being run\n inp, # model input (or a tuple for multiple inputs)\n \"cpu.onnx\", # where to save the model (can be a file or file-like object)\n export_params=True, # store the trained parameter weights inside the model file\n verbose=False,\n opset_version=13, # the ONNX version to export the model to\n do_constant_folding=True, # whether to execute constant folding for optimization\n input_names = ['input'], # the model's input names\n output_names = ['output'], # the model's output names\n dynamic_axes={\n 'input' : {0 : 'batch_size'}, \n 'output' : {0 : 'batch_size'}} # variable length axes\n )\n\n# Load the model and check it's ok\nonnx_model = onnx.load(\"cpu.onnx\")\nonnx.checker.check_model(onnx_model)\n\n# You can ignore the WARNINGS below\nimport onnxruntime as ort\n\nort_sess = ort.InferenceSession('cpu.onnx')\nout = ort_sess.run(None, {'input': inp.numpy()})\n\n# input & output names\ninput_name = ort_sess.get_inputs()[0].name\noutput_name = ort_sess.get_outputs()[0].name\n\n# input dimensions\ninput_dims = ort_sess.get_inputs()[0].shape\nprint(input_name, output_name, input_dims)\n\ntest_close(out, output.detach().numpy())\n!rm \"cpu.onnx\"", + "crumbs": [ + "Models", + "RNNs", + "RNNs" + ] + }, + { + "objectID": "models.xcm.html", + "href": "models.xcm.html", + "title": "XCM", + "section": "", + "text": "An Explainable Convolutional Neural Network for Multivariate Time Series Classification\n\nThis is an unofficial PyTorch implementation of XCM created by Ignacio Oguiza (oguiza@timeseriesAI.co)\n\nsource\n\nXCM\n\n XCM (c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128,\n window_perc:float=1.0, flatten:bool=False, custom_head:<built-\n infunctioncallable>=None, concat_pool:bool=False,\n fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.data.basics import *\nfrom tsai.learner import *\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntfms = [None, TSCategorize()]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms)\nmodel = XCM(dls.vars, dls.c, dls.len)\nlearn = ts_learner(dls, model, metrics=accuracy)\nxb, yb = dls.one_batch()\n\nbs, c_in, seq_len = xb.shape\nc_out = len(np.unique(yb.cpu().numpy()))\n\nmodel = XCM(c_in, c_out, seq_len, fc_dropout=.5)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = XCM(c_in, c_out, seq_len, concat_pool=True)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = XCM(c_in, c_out, seq_len)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel\n\nXCM(\n (conv2dblock): Sequential(\n (0): Unsqueeze(dim=1)\n (1): Conv2dSame(\n (conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))\n )\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (conv2d1x1block): Sequential(\n (0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))\n (1): ReLU()\n (2): Squeeze(dim=1)\n )\n (conv1dblock): Sequential(\n (0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (conv1d1x1block): Sequential(\n (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))\n (1): ReLU()\n )\n (concat): Concat(dim=1)\n (conv1d): Sequential(\n (0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (head): Sequential(\n (0): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=128, out_features=6, bias=True)\n )\n )\n)\n\n\n\nmodel.show_gradcam(xb, yb)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nmodel.show_gradcam(xb[0], yb[0])\n\n[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 10\nxb = torch.rand(bs, n_vars, seq_len)\nnew_head = partial(conv_lin_nd_head, d=(5, 2))\nnet = XCM(n_vars, c_out, seq_len, custom_head=new_head)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 5, 2, 10])\n\n\ncreate_conv_lin_nd_head(\n (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))\n (1): Linear(in_features=12, out_features=10, bias=True)\n (2): Transpose(-1, -2)\n (3): Reshape(bs, 5, 2, 10)\n)\n\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\nnet = XCM(n_vars, c_out, seq_len)\nchange_model_head(net, create_pool_plus_head, concat_pool=False)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 2])\n\n\nSequential(\n (0): AdaptiveAvgPool1d(output_size=1)\n (1): Reshape(bs)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Linear(in_features=128, out_features=512, bias=False)\n (4): ReLU(inplace=True)\n (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (6): Linear(in_features=512, out_features=2, bias=False)\n)", + "crumbs": [ + "Models", + "CNNs", + "XCM" + ] + }, + { + "objectID": "models.tsitplus.html", + "href": "models.tsitplus.html", + "title": "TSiT", + "section": "", + "text": "This is a PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on ViT (Vision Transformer):\nDosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., … & Houlsby, N. (2020).\nAn image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.\nsource", + "crumbs": [ + "Models", + "Transformers", + "TSiT" + ] + }, + { + "objectID": "models.tsitplus.html#feature-extractor", + "href": "models.tsitplus.html#feature-extractor", + "title": "TSiT", + "section": "Feature extractor", + "text": "Feature extractor\nIt’s a known fact that transformers cannot be directly applied to long sequences. To avoid this, we have included a way to subsample the sequence to generate a more manageable input.\n\nfrom tsai.data.validation import get_splits\nfrom tsai.data.core import get_ts_dls\n\n\nX = np.zeros((10, 3, 5000)) \ny = np.random.randint(0,2,X.shape[0])\nsplits = get_splits(y)\ndls = get_ts_dls(X, y, splits=splits)\nxb, yb = dls.train.one_batch()\nxb\n\n\n\n\n\n\n\n\nTSTensor(samples:8, vars:3, len:5000, device=cpu, dtype=torch.float32)\n\n\nIf you try to use TSiTPlus, it’s likely you’ll get an ‘out-of-memory’ error.\nTo avoid this you can subsample the sequence reducing the input’s length. This can be done in multiple ways. Here are a few examples:\n\n# Separable convolution (to avoid mixing channels)\nfeature_extractor = Conv1d(xb.shape[1], xb.shape[1], ks=100, stride=50, padding=0, groups=xb.shape[1]).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 99])\n\n\n\n# Convolution (if you want to mix channels or change number of channels)\nfeature_extractor=MultiConv1d(xb.shape[1], 64, kss=[1,3,5,7,9], keep_original=True).to(default_device())\ntest_eq(feature_extractor.to(xb.device)(xb).shape, (xb.shape[0], 64, xb.shape[-1]))\n\n\n# MaxPool\nfeature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.MaxPool1d(kernel_size=100, stride=50)).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 100])\n\n\n\n# AvgPool\nfeature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.AvgPool1d(kernel_size=100, stride=50)).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 100])\n\n\nOnce you decide what type of transform you want to apply, you just need to pass the layer as the feature_extractor attribute:\n\nbs = 16\nnvars = 4\nseq_len = 1000\nc_out = 2\nd_model = 128\n\nxb = torch.rand(bs, nvars, seq_len)\nfeature_extractor = partial(Conv1d, ks=5, stride=3, padding=0, groups=xb.shape[1])\nmodel = TSiTPlus(nvars, c_out, seq_len, d_model=d_model, feature_extractor=feature_extractor)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))", + "crumbs": [ + "Models", + "Transformers", + "TSiT" + ] + }, + { + "objectID": "models.tsitplus.html#categorical-variables", + "href": "models.tsitplus.html#categorical-variables", + "title": "TSiT", + "section": "Categorical variables", + "text": "Categorical variables\n\nfrom tsai.utils import alphabet, ALPHABET\n\n\na = alphabet[np.random.randint(0,3,40)]\nb = ALPHABET[np.random.randint(6,10,40)]\nc = np.random.rand(40).reshape(4,1,10)\nmap_a = {k:v for v,k in enumerate(np.unique(a))}\nmap_b = {k:v for v,k in enumerate(np.unique(b))}\nn_cat_embeds = [len(m.keys()) for m in [map_a, map_b]]\nszs = [emb_sz_rule(n) for n in n_cat_embeds]\na = np.asarray(a.map(map_a)).reshape(4,1,10)\nb = np.asarray(b.map(map_b)).reshape(4,1,10)\ninp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()\nfeature_extractor = partial(Conv1d, ks=3, padding='same')\nmodel = TSiTPlus(3, 2, 10, d_model=64, cat_pos=[1,2], feature_extractor=feature_extractor)\ntest_eq(model(inp).shape, (4,2))\n\n[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.", + "crumbs": [ + "Models", + "Transformers", + "TSiT" + ] + }, + { + "objectID": "models.tsitplus.html#sequence-embedding", + "href": "models.tsitplus.html#sequence-embedding", + "title": "TSiT", + "section": "Sequence Embedding", + "text": "Sequence Embedding\nSometimes you have a samples with a very long sequence length. In those cases you may want to reduce it’s length before passing it to the transformer. To do that you may just pass a token_size like in this example:\n\nt = torch.rand(8, 2, 10080)\nSeqTokenizer(2, 128, 60)(t).shape\n\ntorch.Size([8, 128, 168])\n\n\n\nt = torch.rand(8, 2, 10080)\nmodel = TSiTPlus(2, 5, 10080, d_model=64, token_size=60)\nmodel(t).shape\n\ntorch.Size([8, 5])", + "crumbs": [ + "Models", + "Transformers", + "TSiT" + ] + }, + { + "objectID": "data.metadatasets.html", + "href": "data.metadatasets.html", + "title": "Metadataset", + "section": "", + "text": "A dataset of datasets\n\nThis functionality will allow you to create a dataset from data stores in multiple, smaller datasets.\nI’d like to thank both Thomas Capelle (https://github.com/tcapelle) and Xander Dunn (https://github.com/xanderdunn) for their contributions to make this code possible.\nThis functionality allows you to use multiple numpy arrays instead of a single one, which may be very useful in many practical settings. It’s been tested it with 10k+ datasets and it works well.\n\nsource\n\nTSMetaDatasets\n\n TSMetaDatasets (metadataset, splits)\n\nBase class for lists with subsets\n\nsource\n\n\nTSMetaDataset\n\n TSMetaDataset (dataset_list, **kwargs)\n\nInitialize self. See help(type(self)) for accurate signature.\nLet’s create 3 datasets. In this case they will have different sizes.\n\nvocab = alphabet[:10]\ndsets = []\nfor i in range(3):\n size = np.random.randint(50, 150)\n X = torch.rand(size, 5, 50)\n y = vocab[torch.randint(0, 10, (size,))]\n tfms = [None, TSClassification(vocab=vocab)]\n dset = TSDatasets(X, y, tfms=tfms)\n dsets.append(dset)\n\n\n\nmetadataset = TSMetaDataset(dsets)\nsplits = TimeSplitter(show_plot=False)(metadataset)\nmetadatasets = TSMetaDatasets(metadataset, splits=splits)\ndls = TSDataLoaders.from_dsets(metadatasets.train, metadatasets.valid)\nxb, yb = dls.train.one_batch()\nxb, yb\n\n(TSTensor(samples:64, vars:5, len:50, device=cpu, dtype=torch.float32),\n TensorCategory([1, 0, 3, 9, 7, 2, 8, 6, 1, 1, 1, 8, 1, 1, 9, 2, 6, 6, 1, 5, 5,\n 6, 9, 2, 7, 1, 6, 4, 9, 2, 5, 0, 4, 9, 1, 4, 4, 6, 0, 8, 8, 5,\n 8, 6, 9, 0, 8, 8, 6, 4, 8, 9, 7, 3, 4, 7, 7, 8, 6, 2, 3, 0, 7,\n 4]))\n\n\nYou can train metadatasets as you would train any other time series model in tsai:\nlearn = ts_learner(dls, arch=\"TSTPlus\")\nlearn.fit_one_cycle(1)\nlearn.export(\"test.pkl\")\nFor inference, you should create the new metadatasets using the same method you used when you trained it. The you use fastai’s learn.get_preds method to generate predictions:\nvocab = alphabet[:10]\ndsets = []\nfor i in range(3):\n size = np.random.randint(50, 150)\n X = torch.rand(size, 5, 50)\n y = vocab[torch.randint(0, 10, (size,))]\n tfms = [None, TSClassification(vocab=vocab)]\n dset = TSDatasets(X, y, tfms=tfms)\n dsets.append(dset)\nmetadataset = TSMetaDataset(dsets)\ndl = TSDataLoader(metadataset)\n\n\nlearn = load_learner(\"test.pkl\")\nlearn.get_preds(dl=dl)\nThere also en easy way to map any particular sample in a batch to the original dataset and id:\n\ndls = TSDataLoaders.from_dsets(metadatasets.train, metadatasets.valid)\nxb, yb = first(dls.train)\nmappings = dls.train.dataset.mapping_idxs\nfor i, (xbi, ybi) in enumerate(zip(xb, yb)):\n ds, idx = mappings[i]\n test_close(dsets[ds][idx][0].data.cpu(), xbi.cpu())\n test_close(dsets[ds][idx][1].data.cpu(), ybi.cpu())\n\nFor example the 3rd sample in this batch would be:\n\ndls.train.dataset.mapping_idxs[2]\n\narray([ 0, 112], dtype=int32)", + "crumbs": [ + "Data", + "Metadataset" + ] + }, + { + "objectID": "models.multiinputnet.html", + "href": "models.multiinputnet.html", + "title": "MultiInputNet", + "section": "", + "text": "This is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co).\nIt can be used to combine different types of deep learning models into a single one that will accept multiple inputs from a MixedDataLoaders.\n\nsource\n\nMultiInputNet\n\n MultiInputNet (*models, c_out=None, reshape_fn=None, multi_output=False,\n custom_head=None, device=None, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.basics import *\nfrom tsai.data.all import *\nfrom tsai.models.utils import *\nfrom tsai.models.InceptionTimePlus import *\nfrom tsai.models.TabModel import *\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, split_data=False)\nts_features_df = get_ts_features(X, y)\n\nFeature Extraction: 100%|███████████████████████████████████████████| 40/40 [00:07<00:00, 5.23it/s]\n\n\n\n# raw ts\ntfms = [None, [TSCategorize()]]\nbatch_tfms = TSStandardize()\nts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nts_model = build_ts_model(InceptionTimePlus, dls=ts_dls)\n\n# ts features\ncat_names = None\ncont_names = ts_features_df.columns[:-2]\ny_names = 'target'\ntab_dls = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)\ntab_model = build_tabular_model(TabModel, dls=tab_dls)\n\n# mixed\nmixed_dls = get_mixed_dls(ts_dls, tab_dls)\nMultiModalNet = MultiInputNet(ts_model, tab_model)\nlearn = Learner(mixed_dls, MultiModalNet, metrics=[accuracy, RocAuc()])\nlearn.fit_one_cycle(1, 1e-3)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\nroc_auc_score\ntime\n\n\n\n\n0\n1.780674\n1.571718\n0.477778\n0.857444\n00:05\n\n\n\n\n\n\n(ts, (cat, cont)),yb = mixed_dls.one_batch()\nlearn.model((ts, (cat, cont))).shape\n\ntorch.Size([64, 6])\n\n\n\ntab_dls.c, ts_dls.c, ts_dls.cat\n\n(6, 6, True)\n\n\n\nlearn.loss_func\n\nFlattenedLoss of CrossEntropyLoss()", + "crumbs": [ + "Models", + "Miscellaneous", + "MultiInputNet" + ] + }, + { + "objectID": "utils.html", + "href": "utils.html", + "title": "Utilities", + "section": "", + "text": "General helper functions used throughout the library\n\n\nsource\n\nrandom_rand\n\n random_rand (*d, dtype=None, out=None, seed=None)\n\nSame as np.random.rand but with a faster random generator, dtype and seed\n\nsource\n\n\nrandom_randint\n\n random_randint (low, high=None, size=None, dtype=<class 'int'>,\n endpoint=False, seed=None)\n\nSame as np.random.randint but with a faster random generator and seed\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nlow\n\n\nint, lower endpoint of interval (inclusive)\n\n\nhigh\nNoneType\nNone\nint, upper endpoint of interval (exclusive), or None for a single-argument form of low.\n\n\nsize\nNoneType\nNone\nint or tuple of ints, optional. Output shape.\n\n\ndtype\ntype\nint\ndata type of the output.\n\n\nendpoint\nbool\nFalse\nbool, optional. If True, high is an inclusive endpoint. If False, the range is open on the right.\n\n\nseed\nNoneType\nNone\nint or None, optional. Seed for the random number generator.\n\n\n\n\nsource\n\n\nrandom_choice\n\n random_choice (a, size=None, replace=True, p=None, axis=0, shuffle=True,\n dtype=None, seed=None)\n\nSame as np.random.choice but with a faster random generator, dtype and seed\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\na\n\n\n1-D array-like or int. The values from which to draw the samples.\n\n\nsize\nNoneType\nNone\nint or tuple of ints, optional. The shape of the output.\n\n\nreplace\nbool\nTrue\nbool, optional. Whether or not to allow the same value to be drawn multiple times.\n\n\np\nNoneType\nNone\n1-D array-like, optional. The probabilities associated with each entry in a.\n\n\naxis\nint\n0\nint, optional. The axis along which the samples are drawn.\n\n\nshuffle\nbool\nTrue\nbool, optional. Whether or not to shuffle the samples before returning them.\n\n\ndtype\nNoneType\nNone\ndata type of the output.\n\n\nseed\nNoneType\nNone\nint or None, optional. Seed for the random number generator.\n\n\n\n\na = random_choice(10, size=(2,3,4), replace=True, p=None, seed=1)\nb = random_choice(10, size=(2,3,4), replace=True, p=None, seed=1)\ntest_eq(a, b)\nc = random_choice(10, size=(2,3,4), replace=True, p=None, seed=2)\ntest_ne(a, c)\n\nassert random_choice(10, size=3, replace=True, p=None).shape == (3,)\nassert random_choice(10, size=(2,3,4), replace=True, p=None).shape == (2,3,4)\n\nprint(random_choice(10, size=3, replace=True, p=None))\nprint(random_choice(10, size=3, replace=False, p=None))\na = [2, 5, 4, 9, 13, 25, 56, 83, 99, 100]\nprint(random_choice(a, size=3, replace=False, p=None))\n\n[5 7 5]\n[0 1 6]\n[ 4 83 100]\n\n\n\na = random_randint(10, 20, 100, seed=1)\nb = random_randint(10, 20, 100, seed=1)\ntest_eq(a, b)\nc = random_randint(10, 20, 100, seed=2)\ntest_ne(a, c)\nassert (a >= 10).all() and (a < 20).all()\n\n\na = random_rand(2, 3, 4, seed=123)\nb = random_rand(2, 3, 4, seed=123)\ntest_eq(a, b)\nc = random_rand(2, 3, 4, seed=124)\ntest_ne(a, c)\nassert (a >= 0).all() and (a < 1).all()\n\na = random_rand(2, 3, 4)\na_copy = a.copy()\nrandom_rand(2, 3, 4, out=a)\ntest_ne(a, a_copy)\n\n\nsource\n\n\nis_slice\n\n is_slice (o)\n\n\nsource\n\n\nis_memmap\n\n is_memmap (o)\n\n\nsource\n\n\nis_dask\n\n is_dask (o)\n\n\nsource\n\n\nis_zarr\n\n is_zarr (o)\n\n\nsource\n\n\nis_tensor\n\n is_tensor (o)\n\n\nsource\n\n\nis_nparray\n\n is_nparray (o)\n\n\n# ensure these folders exist for testing purposes\nfns = ['data', 'export', 'models']\nfor fn in fns:\n path = Path('.')/fn\n if not os.path.exists(path): os.makedirs(path)\n\n\nsource\n\n\ntodtype\n\n todtype (dtype)\n\n\nsource\n\n\nto3dPlusArray\n\n to3dPlusArray (o)\n\n\nsource\n\n\nto3dPlusTensor\n\n to3dPlusTensor (o)\n\n\nsource\n\n\nto2dPlusArray\n\n to2dPlusArray (o)\n\n\nsource\n\n\nto2dPlusTensor\n\n to2dPlusTensor (o)\n\n\nsource\n\n\nto3dPlus\n\n to3dPlus (o)\n\n\nsource\n\n\nto2dPlus\n\n to2dPlus (o)\n\n\nsource\n\n\nto1d\n\n to1d (o)\n\n\nsource\n\n\nto2d\n\n to2d (o)\n\n\nsource\n\n\nto3d\n\n to3d (o)\n\n\nsource\n\n\nto1darray\n\n to1darray (o)\n\n\nsource\n\n\nto2darray\n\n to2darray (o)\n\n\nsource\n\n\nto3darray\n\n to3darray (o)\n\n\nsource\n\n\nto1dtensor\n\n to1dtensor (o)\n\n\nsource\n\n\nto2dtensor\n\n to2dtensor (o)\n\n\nsource\n\n\nto3dtensor\n\n to3dtensor (o)\n\n\nsource\n\n\ntoL\n\n toL (o)\n\n\nsource\n\n\ntoarray\n\n toarray (o)\n\n\nsource\n\n\ntotensor\n\n totensor (o)\n\n\na = np.random.rand(100).astype(np.float32)\nb = torch.from_numpy(a).float()\ntest_eq(totensor(a), b)\ntest_eq(a, toarray(b))\ntest_eq(to3dtensor(a).ndim, 3)\ntest_eq(to2dtensor(a).ndim, 2)\ntest_eq(to1dtensor(a).ndim, 1)\ntest_eq(to3darray(b).ndim, 3)\ntest_eq(to2darray(b).ndim, 2)\ntest_eq(to1darray(b).ndim, 1)\n\n\ndata = np.random.rand(10, 20)\ndf = pd.DataFrame(data)\ndf['target'] = np.random.randint(0, 3, len(df))\nX = df[df.columns[:-1]]\ny = df['target']\ntest_eq(to3darray(X).shape, (10, 1, 20))\ntest_eq(toarray(y).shape, (10,))\n\n\nsource\n\n\nget_file_size\n\n get_file_size (file_path:str, return_str:bool=True, decimals:int=2)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nfile_path\nstr\n\npath to file\n\n\nreturn_str\nbool\nTrue\nTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.\n\n\ndecimals\nint\n2\nNumber of decimals in the output\n\n\n\n\nsource\n\n\nget_dir_size\n\n get_dir_size (dir_path:str, return_str:bool=True, decimals:int=2,\n verbose:bool=False)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndir_path\nstr\n\npath to directory\n\n\nreturn_str\nbool\nTrue\nTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.\n\n\ndecimals\nint\n2\nNumber of decimals in the output\n\n\nverbose\nbool\nFalse\nControls verbosity\n\n\n\n\nsource\n\n\nget_size\n\n get_size (o, return_str=False, decimals=2)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\no\n\n\nAny python object\n\n\nreturn_str\nbool\nFalse\nTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.\n\n\ndecimals\nint\n2\nNumber of decimals in the output\n\n\n\n\nsource\n\n\nbytes2str\n\n bytes2str (size_bytes:int, decimals=2)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nsize_bytes\nint\n\nNumber of bytes\n\n\ndecimals\nint\n2\nNumber of decimals in the output\n\n\nReturns\nstr\n\n\n\n\n\n\na = np.random.rand(10, 5, 3)\ntest_eq(get_size(a, True, 1), '1.2 KB')\n\n\nsource\n\n\nis_np_view\n\n is_np_view (o)\n\n\n\n\n\nDetails\n\n\n\n\no\na numpy array\n\n\n\n\na = np.array([1., 2., 3.])\ntest_eq(is_np_view(a), False)\ntest_eq(is_np_view(a[1:]), True)\n\n\nsource\n\n\nis_dir\n\n is_dir (path)\n\n\nsource\n\n\nis_file\n\n is_file (path)\n\n\ntest_eq(is_file(\"002_utils.ipynb\"), True)\ntest_eq(is_file(\"utils.ipynb\"), False)\n\n\nsource\n\n\ndelete_all_in_dir\n\n delete_all_in_dir (tgt_dir, exception=None)\n\n\nsource\n\n\nreverse_dict\n\n reverse_dict (dictionary)\n\n\nsource\n\n\nis_tuple\n\n is_tuple (o)\n\n\nsource\n\n\nitemify\n\n itemify (*o, tup_id=None)\n\n\na = [1, 2, 3]\nb = [4, 5, 6]\nprint(itemify(a, b))\ntest_eq(len(itemify(a, b)), len(a))\na = [1, 2, 3]\nb = None\nprint(itemify(a, b))\ntest_eq(len(itemify(a, b)), len(a))\na = [1, 2, 3]\nb = [4, 5, 6]\nc = None\nprint(itemify(a, b, c))\ntest_eq(len(itemify(a, b, c)), len(a))\n\n[(1, 4), (2, 5), (3, 6)]\n[(1,), (2,), (3,)]\n[(1, 4), (2, 5), (3, 6)]\n\n\n\nsource\n\n\nifelse\n\n ifelse (a, b, c)\n\nb if a is True else c\n\nsource\n\n\nexists\n\n exists (o)\n\n\nsource\n\n\nisnone\n\n isnone (o)\n\n\na = np.array(3)\ntest_eq(isnone(a), False)\ntest_eq(exists(a), True)\nb = None\ntest_eq(isnone(b), True)\ntest_eq(exists(b), False)\n\n\nsource\n\n\ntest_eq_nan\n\n test_eq_nan (a, b)\n\ntest that a==b excluding nan values (valid for torch.Tensor and np.ndarray)\n\nsource\n\n\ntest_error\n\n test_error (error, f, *args, **kwargs)\n\n\nsource\n\n\ntest_not_ok\n\n test_not_ok (f, *args, **kwargs)\n\n\nsource\n\n\ntest_ok\n\n test_ok (f, *args, **kwargs)\n\n\nsource\n\n\ntest_type\n\n test_type (a, b)\n\n\nsource\n\n\ntest_not_close\n\n test_not_close (a, b, eps=1e-05)\n\ntest that a is within eps of b\n\nsource\n\n\nis_not_close\n\n is_not_close (a, b, eps=1e-05)\n\nIs a within eps of b\n\nsource\n\n\nassert_fn\n\n assert_fn (*args, **kwargs)\n\n\nsource\n\n\ntest_le\n\n test_le (a, b)\n\ntest that a>b\n\nsource\n\n\ntest_lt\n\n test_lt (a, b)\n\ntest that a>b\n\nsource\n\n\ntest_ge\n\n test_ge (a, b)\n\ntest that a>=b\n\nsource\n\n\ntest_gt\n\n test_gt (a, b)\n\ntest that a>b\n\ntest_ok(test_gt, 5, 4)\ntest_not_ok(test_gt, 4, 4)\ntest_ok(test_ge, 4, 4)\ntest_not_ok(test_ge, 3, 4)\n\ntest_ok(test_lt, 3, 4)\ntest_not_ok(test_lt, 4, 4)\ntest_ok(test_le, 4, 4)\ntest_not_ok(test_le, 5, 4)\n\n\nt = torch.rand(100)\ntest_eq(t, t)\ntest_eq_nan(t, t)\n\n\nsource\n\n\nstack_pad\n\n stack_pad (o, padding_value=nan)\n\nConverts a an iterable into a numpy array using padding if necessary\n\nsource\n\n\nstack\n\n stack (o, axis=0, retain=True)\n\n\no = [[0,1,2], [4,5,6,7]]\ntest_eq(stack_pad(o).shape, (1, 2, 4))\ntest_eq(type(stack_pad(o)), np.ndarray)\ntest_eq(np.isnan(stack_pad(o)).sum(), 1)\n\n\no = 3\nprint(stack_pad(o))\ntest_eq(stack_pad(o), np.array([[3.]]))\no = [4,5]\nprint(stack_pad(o))\ntest_eq(stack_pad(o), np.array([[4., 5.]]))\no = [[0,1,2], [4,5,6,7]]\nprint(stack_pad(o))\no = np.array([0, [1,2]], dtype=object)\nprint(stack_pad(o))\no = np.array([[[0], [10, 20], [100, 200, 300]], [[0, 1, 2, 3], [10, 20], [100]]], dtype=object)\nprint(stack_pad(o))\no = np.array([0, [10, 20]], dtype=object)\nprint(stack_pad(o))\n\n[[3.]]\n[[4. 5.]]\n[[[ 0. 1. 2. nan]\n [ 4. 5. 6. 7.]]]\n[[ 0. nan]\n [ 1. 2.]]\n[[[ 0. nan nan nan]\n [ 10. 20. nan nan]\n [100. 200. 300. nan]]\n\n [[ 0. 1. 2. 3.]\n [ 10. 20. nan nan]\n [100. nan nan nan]]]\n[[ 0. nan]\n [10. 20.]]\n\n\n\na = np.random.rand(2, 3, 4)\nt = torch.from_numpy(a)\ntest_eq_type(stack(itemify(a, tup_id=0)), a)\ntest_eq_type(stack(itemify(t, tup_id=0)), t)\n\n\nsource\n\n\npad_sequences\n\n pad_sequences (o, maxlen:int=None,\n dtype:(<class'str'>,<class'type'>)=<class\n 'numpy.float64'>, padding:str='pre', truncating:str='pre',\n padding_value:float=nan)\n\nTransforms an iterable with sequences into a 3d numpy array using padding or truncating sequences if necessary\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\no\n\n\nIterable object\n\n\nmaxlen\nint\nNone\nOptional max length of the output. If None, max length of the longest individual sequence.\n\n\ndtype\n(<class ‘str’>, <class ‘type’>)\nfloat64\nType of the output sequences. To pad sequences with variable length strings, you can use object.\n\n\npadding\nstr\npre\n‘pre’ or ‘post’ pad either before or after each sequence.\n\n\ntruncating\nstr\npre\n‘pre’ or ‘post’ remove values from sequences larger than maxlen, either at the beginning or at the end of the sequences.\n\n\npadding_value\nfloat\nnan\nValue used for padding.\n\n\n\nThis function transforms a list (of length n_samples) of sequences into a 3d numpy array of shape:\n [n_samples x n_vars x seq_len]\nseq_len is either the maxlen argument if provided, or the length of the longest sequence in the list.\nSequences that are shorter than seq_len are padded with value until they are seq_len long.\nSequences longer than seq_len are truncated so that they fit the desired length.\nThe position where padding or truncation happens is determined by the arguments padding and truncating, respectively. Pre-padding or removing values from the beginning of the sequence is the default.\nInput sequences to pad_sequences may be have 1, 2 or 3 dimensions:\n\n# 1 dim\na1 = np.arange(6)\na2 = np.arange(3) * 10\na3 = np.arange(2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 1, 4))\npadded_o\n\narray([[[ 2., 3., 4., 5.]],\n\n [[ 0., 10., 20., nan]],\n\n [[ 0., 100., nan, nan]]])\n\n\n\n# 2 dim\na1 = np.arange(12).reshape(2, 6)\na2 = np.arange(6).reshape(2, 3) * 10\na3 = np.arange(4).reshape(2, 2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 4))\npadded_o\n\narray([[[ 2., 3., 4., 5.],\n [ 8., 9., 10., 11.]],\n\n [[ 0., 10., 20., nan],\n [ 30., 40., 50., nan]],\n\n [[ 0., 100., nan, nan],\n [200., 300., nan, nan]]])\n\n\n\n# 3 dim\na1 = np.arange(10).reshape(1, 2, 5)\na2 = np.arange(6).reshape(1, 2, 3) * 10\na3 = np.arange(4).reshape(1, 2, 2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=None, dtype=np.float64, padding='pre', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 5))\npadded_o\n\narray([[[ 0., 1., 2., 3., 4.],\n [ 5., 6., 7., 8., 9.]],\n\n [[ nan, nan, 0., 10., 20.],\n [ nan, nan, 30., 40., 50.]],\n\n [[ nan, nan, nan, 0., 100.],\n [ nan, nan, nan, 200., 300.]]])\n\n\n\n# 3 dim\na1 = np.arange(10).reshape(1, 2, 5)\na2 = np.arange(6).reshape(1, 2, 3) * 10\na3 = np.arange(4).reshape(1, 2, 2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='pre', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 4))\npadded_o\n\narray([[[ 1., 2., 3., 4.],\n [ 6., 7., 8., 9.]],\n\n [[ nan, 0., 10., 20.],\n [ nan, 30., 40., 50.]],\n\n [[ nan, nan, 0., 100.],\n [ nan, nan, 200., 300.]]])\n\n\n\n# 3 dim\na1 = np.arange(10).reshape(1, 2, 5)\na2 = np.arange(6).reshape(1, 2, 3) * 10\na3 = np.arange(4).reshape(1, 2, 2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 4))\npadded_o\n\narray([[[ 1., 2., 3., 4.],\n [ 6., 7., 8., 9.]],\n\n [[ 0., 10., 20., nan],\n [ 30., 40., 50., nan]],\n\n [[ 0., 100., nan, nan],\n [200., 300., nan, nan]]])\n\n\n\n# 3 dim\na1 = np.arange(10).reshape(1, 2, 5)\na2 = np.arange(6).reshape(1, 2, 3) * 10\na3 = np.arange(4).reshape(1, 2, 2) * 100\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='post', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 4))\npadded_o\n\narray([[[ 0., 1., 2., 3.],\n [ 5., 6., 7., 8.]],\n\n [[ 0., 10., 20., nan],\n [ 30., 40., 50., nan]],\n\n [[ 0., 100., nan, nan],\n [200., 300., nan, nan]]])\n\n\n\n# iterable is a list of lists\na1 = np.arange(12).reshape(1, 2, 6).tolist()\na2 = (np.arange(6).reshape(1, 2, 3) * 10).tolist()\na3 = (np.arange(4).reshape(1, 2, 2) * 100).tolist()\no = [a1, a2, a3]\npadded_o = pad_sequences(o, maxlen=None, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)\ntest_eq(padded_o.shape, (3, 2, 6))\npadded_o\n\narray([[[ 0., 1., 2., 3., 4., 5.],\n [ 6., 7., 8., 9., 10., 11.]],\n\n [[ 0., 10., 20., nan, nan, nan],\n [ 30., 40., 50., nan, nan, nan]],\n\n [[ 0., 100., nan, nan, nan, nan],\n [200., 300., nan, nan, nan, nan]]])\n\n\n\nsource\n\n\nmatch_seq_len\n\n match_seq_len (*arrays)\n\n\na = np.random.rand(10, 5, 8)\nb = np.random.rand(3, 5, 10)\nc, d = match_seq_len(a, b)\ntest_eq(c.shape[-1], d.shape[-1])\n\n\nsource\n\n\nrandom_shuffle\n\n random_shuffle (o, random_state=None)\n\n\na = np.arange(10)\ntest_eq_type(random_shuffle(a, 1), np.array([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))\nt = torch.arange(10)\ntest_eq_type(random_shuffle(t, 1), tensor([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))\nl = list(a)\ntest_eq(random_shuffle(l, 1), [2, 9, 6, 4, 0, 3, 1, 7, 8, 5])\nl2 = L(l)\ntest_eq_type(random_shuffle(l2, 1), L([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))\n\n\nsource\n\n\ncat2int\n\n cat2int (o)\n\n\na = np.array(['b', 'a', 'a', 'b', 'a', 'b', 'a'])\ntest_eq_type(cat2int(a), TensorCategory([1, 0, 0, 1, 0, 1, 0]))\n\n\nTensorBase([1,2,3])\n\nTensorBase([1, 2, 3])\n\n\n\nsource\n\n\ncycle_dl_estimate\n\n cycle_dl_estimate (dl, iters=10)\n\n\nsource\n\n\ncycle_dl_to_device\n\n cycle_dl_to_device (dl, show_progress_bar=True)\n\n\nsource\n\n\ncycle_dl\n\n cycle_dl (dl, show_progress_bar=True)\n\n\nsource\n\n\ncache_data\n\n cache_data (o, slice_len=10000, verbose=False)\n\n\nsource\n\n\nget_func_defaults\n\n get_func_defaults (f)\n\n\nsource\n\n\nget_idx_from_df_col_vals\n\n get_idx_from_df_col_vals (df, col, val_list)\n\n\nsource\n\n\nget_sublist_idxs\n\n get_sublist_idxs (aList, bList)\n\nGet idxs that when applied to aList will return bList. aList must contain all values in bList\n\nx = np.array([3, 5, 7, 1, 9, 8, 6, 2])\ny = np.array([6, 1, 5, 7])\nidx = get_sublist_idxs(x, y)\ntest_eq(x[idx], y)\nx = np.array([3, 5, 7, 1, 9, 8, 6, 6, 2])\ny = np.array([6, 1, 5, 7, 5])\nidx = get_sublist_idxs(x, y)\ntest_eq(x[idx], y)\n\n\nsource\n\n\nflatten_list\n\n flatten_list (l)\n\n\nsource\n\n\ndisplay_pd_df\n\n display_pd_df (df, max_rows:Union[bool,int]=False,\n max_columns:Union[bool,int]=False)\n\n\nold_max_rows, old_max_columns = pd.get_option('display.max_rows'), pd.get_option('display.max_columns')\ndf = pd.DataFrame(np.random.rand(70, 25))\ndisplay_pd_df(df, max_rows=2, max_columns=3)\ntest_eq(old_max_rows, pd.get_option('display.max_rows'))\ntest_eq(old_max_columns, pd.get_option('display.max_columns'))\n\n\n\n\n\n\n\n\n0\n...\n24\n\n\n\n\n0\n0.436034\n...\n0.231616\n\n\n...\n...\n...\n...\n\n\n69\n0.633051\n...\n0.051762\n\n\n\n\n70 rows × 25 columns\n\n\n\n\nsource\n\n\ntscore\n\n tscore (o)\n\n\nsource\n\n\nkstest\n\n kstest (data1, data2, alternative='two-sided', mode='auto', by_axis=None)\n\nPerforms the two-sample Kolmogorov-Smirnov test for goodness of fit.\nParameters data1, data2: Two arrays of sample observations assumed to be drawn from a continuous distributions. Sample sizes can be different. alternative: {‘two-sided’, ‘less’, ‘greater’}, optional. Defines the null and alternative hypotheses. Default is ‘two-sided’. mode: {‘auto’, ‘exact’, ‘asymp’}, optional. Defines the method used for calculating the p-value. by_axis (optional, int): for arrays with more than 1 dimension, the test will be run for each variable in that axis if by_axis is not None.\n\nsource\n\n\nttest\n\n ttest (data1, data2, equal_var=False)\n\nCalculates t-statistic and p-value based on 2 sample distributions\n\na = np.random.normal(0.5, 1, 100)\nb = np.random.normal(0.15, .5, 50)\nplt.hist(a, 50)\nplt.hist(b, 50)\nplt.show()\nttest(a,b)\n\n\n\n\n\n\n\n\n\na = np.random.normal(0.5, 1, (100,3))\nb = np.random.normal(0.5, 1, (50,))\nkstest(a,b)\n\n(0.22333333333333333, 0.02452803315700394)\n\n\n\na = np.random.normal(0.5, 1, (100,3))\nb = np.random.normal(0.15, .5, (50,))\nkstest(a,b)\n\n(0.31, 0.0004061333917852463)\n\n\n\ndata1 = np.random.normal(0,1,(100, 5, 3))\ndata2 = np.random.normal(0,2,(100, 5, 3))\nkstest(data1, data2, by_axis=1)\n\n([0.22,\n 0.16333333333333333,\n 0.16333333333333333,\n 0.18666666666666668,\n 0.21666666666666667],\n [8.994053173844458e-07,\n 0.0006538374533623971,\n 0.0006538374533623971,\n 5.522790313356146e-05,\n 1.4007759411179028e-06])\n\n\n\na = np.random.normal(0.5, 1, 100)\nt = torch.normal(0.5, 1, (100, ))\ntscore(a), tscore(t)\n\n(4.33309224863388, tensor(5.7798))\n\n\n\nsource\n\n\nscc\n\n scc (a, b)\n\n\nsource\n\n\npcc\n\n pcc (a, b)\n\n\nsource\n\n\nremove_fn\n\n remove_fn (fn, verbose=False)\n\nRemoves a file (fn) if exists\n\nsource\n\n\nnpsave\n\n npsave (array_fn, array, verbose=True)\n\n\nfn = 'data/remove_fn_test.npy'\na = np.zeros(1)\nnpsave(fn, a)\ndel a\nnp.load(fn, mmap_mode='r+')\nremove_fn(fn, True)\nremove_fn(fn, True)\n\ndata/remove_fn_test.npy does not exist\nsaving data/remove_fn_test.npy...\n...data/remove_fn_test.npy saved\ndata/remove_fn_test.npy file removed\ndata/remove_fn_test.npy does not exist\n\n\n\nsource\n\n\npermute_2D\n\n permute_2D (array, axis=None)\n\nPermute rows or columns in an array. This can be used, for example, in feature permutation\n\ns = np.arange(100 * 50).reshape(100, 50)\ntest_eq(permute_2D(s, axis=0).mean(0), s.mean(0))\ntest_ne(permute_2D(s, axis=0), s)\ntest_eq(permute_2D(s, axis=1).mean(1), s.mean(1))\ntest_ne(permute_2D(s, axis=1), s)\ntest_ne(permute_2D(s), s)\n\n\nsource\n\n\nrandom_half_normal_tensor\n\n random_half_normal_tensor (shape=1, device=None)\n\nReturns a tensor of a predefined shape between 0 and 1 with a half-normal distribution\n\nsource\n\n\nrandom_normal_tensor\n\n random_normal_tensor (shape=1, device=None)\n\nReturns a tensor of a predefined shape between -1 and 1 with a normal distribution\n\nsource\n\n\nrandom_half_normal\n\n random_half_normal ()\n\nReturns a number between 0 and 1 with a half-normal distribution\n\nsource\n\n\nrandom_normal\n\n random_normal ()\n\nReturns a number between -1 and 1 with a normal distribution\n\nsource\n\n\nfig2buf\n\n fig2buf (fig)\n\n\nsource\n\n\nget_plot_fig\n\n get_plot_fig (size=None, dpi=100)\n\n\nsource\n\n\ndefault_dpi\n\n default_dpi ()\n\n\ndefault_dpi()\n\n100\n\n\n\nsource\n\n\nplot_scatter\n\n plot_scatter (x, y, deg=1)\n\n\na = np.random.rand(100)\nb = np.random.rand(100)**2\nplot_scatter(a, b)\n\n\n\n\n\n\n\n\n\nsource\n\n\nget_idxs\n\n get_idxs (o, aList)\n\n\na = random_shuffle(np.arange(100, 200))\nb = np.random.choice(a, 10, False)\nidxs = get_idxs(a, b)\ntest_eq(a[idxs], b)\n\n\nsource\n\n\napply_cmap\n\n apply_cmap (o, cmap)\n\n\na = np.random.rand(16, 1, 40, 50)\ns = L(a.shape)\ns[1] = 3\ntest_eq(L(apply_cmap(a, 'viridis').shape), s)\n\ns[0] = 1\na = np.random.rand(1, 40, 50)\ntest_eq(L(apply_cmap(a, 'viridis').shape), s)\n\n\nsource\n\n\ntorch_tile\n\n torch_tile (a, n_tile, dim=0)\n\n\ntest_eq(torch_tile(torch.arange(2), 3), tensor([0, 1, 0, 1, 0, 1]))\n\n\nsource\n\n\nto_tsfresh_df\n\n to_tsfresh_df (ts)\n\nPrepares a time series (Tensor/ np.ndarray) to be used as a tsfresh dataset to allow feature extraction\n\nts = torch.rand(16, 3, 20)\na = to_tsfresh_df(ts)\nts = ts.numpy()\nb = to_tsfresh_df(ts)\n\n\nsource\n\n\nscorr\n\n scorr (a, b)\n\n\nsource\n\n\npcorr\n\n pcorr (a, b)\n\n\nsource\n\n\ntorch_diff\n\n torch_diff (t, lag=1, pad=True, append=0)\n\n\nt = torch.arange(24).reshape(2,3,4)\ntest_eq(torch_diff(t, 1)[..., 1:].float().mean(), 1.)\ntest_eq(torch_diff(t, 2)[..., 2:].float().mean(), 2.)\n\n\nsource\n\n\ntorch_clamp\n\n torch_clamp (o, min=None, max=None)\n\nClamp torch.Tensor using 1 or multiple dimensions\n\nsource\n\n\nget_percentile\n\n get_percentile (o, percentile, axis=None)\n\n\nsource\n\n\nclip_outliers\n\n clip_outliers (o, axis=None)\n\n\nsource\n\n\nget_outliers_IQR\n\n get_outliers_IQR (o, axis=None, quantile_range=(25.0, 75.0))\n\n\nt = torch.randn(2,3,100)\ntest_eq(type(get_outliers_IQR(t, -1)[0]), torch.Tensor)\na = t.numpy()\ntest_eq(type(get_outliers_IQR(a, -1)[0]), np.ndarray)\ntest_close(get_percentile(t, 25).numpy(), get_percentile(a, 25))\n\n\nsource\n\n\nget_robustscale_params\n\n get_robustscale_params (o, sel_vars=None, not_sel_vars=None, by_var=True,\n percentiles=(25, 75), eps=1e-06)\n\nCalculates median and inter-quartile range required to robust scaler inputs\n\na = np.random.rand(16, 3, 100)\na[a>.8] = np.nan\nmedian, IQR = get_robustscale_params(a, by_var=True, percentiles=(25, 75))\na_scaled = (a - median) / IQR\ntest_eq(a.shape, a_scaled.shape)\ntest_eq(np.isnan(median).sum(),0)\ntest_eq(np.isnan(IQR).sum(),0)\ntest_eq(np.isnan(a), np.isnan(a_scaled))\n\n\nsource\n\n\ntorch_slice_by_dim\n\n torch_slice_by_dim (t, index, dim=-1, **kwargs)\n\n\nt = torch.rand(5, 3)\nindex = torch.randint(0, 3, (5, 1))\n# index = [[0, 2], [0, 1], [1, 2], [0, 2], [0, 1]]\ntorch_slice_by_dim(t, index)\n\ntensor([[0.5341],\n [0.4543],\n [0.0942],\n [0.9645],\n [0.0405]])\n\n\n\nsource\n\n\ntorch_nanstd\n\n torch_nanstd (o, dim=None, keepdim=False)\n\nThere’s currently no torch.nanstd function\n\nsource\n\n\ntorch_nanmean\n\n torch_nanmean (o, dim=None, keepdim=False)\n\nThere’s currently no torch.nanmean function\n\nt = torch.rand(1000)\nt[:100] = float('nan')\nassert torch_nanmean(t).item() > 0\n\n\nsource\n\n\nconcat\n\n concat (*ls, dim=0)\n\nConcatenate tensors, arrays, lists, or tuples by a dimension\n\nsource\n\n\nreduce_memory_usage\n\n reduce_memory_usage (df)\n\n\nsource\n\n\ncls_name\n\n cls_name (o)\n\n\ntest_eq(cls_name(timer), 'Timer')\n\n\nsource\n\n\nrotate_axis2\n\n rotate_axis2 (o, steps=1)\n\n\nsource\n\n\nrotate_axis1\n\n rotate_axis1 (o, steps=1)\n\n\nsource\n\n\nrotate_axis0\n\n rotate_axis0 (o, steps=1)\n\n\nsource\n\n\nrandom_roll3d\n\n random_roll3d (o, axis=(), replace=False)\n\nRandomly rolls a 3D object along the indicated axes This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently\n\nsource\n\n\nrandom_roll2d\n\n random_roll2d (o, axis=(), replace=False)\n\nRolls a 2D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently\n\nsource\n\n\nroll3d\n\n roll3d (o, roll1:Union[NoneType,list,int]=None,\n roll2:Union[NoneType,list,int]=None,\n roll3:Union[NoneType,list,int]=None)\n\nRolls a 3D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently\n\nsource\n\n\nroll2d\n\n roll2d (o, roll1:Union[NoneType,list,int]=None,\n roll2:Union[NoneType,list,int]=None)\n\nRolls a 2D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently\n\na = np.tile(np.arange(10), 3).reshape(3, 10) * np.array([1, 10, 100]).reshape(-1, 1)\na\n\narray([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90],\n [ 0, 100, 200, 300, 400, 500, 600, 700, 800, 900]])\n\n\n\nroll2d(a, roll1=[2, 1, 0])\n\narray([[ 0, 100, 200, 300, 400, 500, 600, 700, 800, 900],\n [ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90],\n [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])\n\n\n\nroll2d(a, roll2=3)\n\narray([[ 7, 8, 9, 0, 1, 2, 3, 4, 5, 6],\n [ 70, 80, 90, 0, 10, 20, 30, 40, 50, 60],\n [700, 800, 900, 0, 100, 200, 300, 400, 500, 600]])\n\n\n\no = torch.arange(24).reshape(2,3,4)\ntest_eq(rotate_axis0(o)[1], o[0])\ntest_eq(rotate_axis1(o)[:,1], o[:,0])\ntest_eq(rotate_axis2(o)[...,1], o[...,0])\n\n\nsource\n\n\nchunks_calculator\n\n chunks_calculator (shape, dtype='float32', n_bytes=1073741824)\n\nFunction to calculate chunks for a given size of n_bytes (default = 1024**3 == 1GB). It guarantees > 50% of the chunk will be filled\n\nshape = (1_000, 10, 1000)\ndtype = 'float32'\ntest_eq(chunks_calculator(shape, dtype), False)\n\nshape = (54684, 10, 1000)\ndtype = 'float32'\ntest_eq(chunks_calculator(shape, dtype), (27342, -1, -1))\n\n\nsource\n\n\nis_memory_shared\n\n is_memory_shared (a, b)\n\nCheck if 2 array-like objects share memory\n\na = np.random.rand(2,3,4)\nt1 = torch.from_numpy(a)\ntest_eq(is_memory_shared(a, t1), True)\na = np.random.rand(2,3,4)\nt2 = torch.as_tensor(a)\ntest_eq(is_memory_shared(a, t2), True)\na = np.random.rand(2,3,4)\nt3 = torch.tensor(a)\ntest_eq(is_memory_shared(a, t3), False)\n\n\nsource\n\n\nassign_in_chunks\n\n assign_in_chunks (a, b, chunksize='auto', inplace=True, verbose=True)\n\nAssigns values in b to an array-like object a using chunks to avoid memory overload. The resulting a retains it’s dtype and share it’s memory. a: array-like object b: may be an integer, float, str, ‘rand’ (for random data), or another array like object. chunksize: is the size of chunks. If ‘auto’ chunks will have around 1GB each.\n\na = np.random.rand(10,3,4).astype('float32')\na_dtype = a.dtype\na_id = id(a)\nb = np.random.rand(10,3,4).astype('float64')\nassign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)\ntest_close(a, b)\ntest_eq(a.dtype, a_dtype)\ntest_eq(id(a), a_id)\n\na = np.random.rand(10,3,4).astype('float32')\na_dtype = a.dtype\na_id = id(a)\nb = 1\nassign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)\ntest_eq(a, np.ones_like(a).astype(a.dtype))\ntest_eq(a.dtype, a_dtype)\ntest_eq(id(a), a_id)\n\na = np.random.rand(10,3,4).astype('float32')\na_dtype = a.dtype\na_id = id(a)\nb = 0.5\nassign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)\ntest_eq(a.dtype, a_dtype)\ntest_eq(id(a), a_id)\n\na = np.random.rand(10,3,4).astype('float32')\na_dtype = a.dtype\na_id = id(a)\nb = 'rand'\nassign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)\ntest_eq(a.dtype, a_dtype)\ntest_eq(id(a), a_id)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\na = np.random.rand(10,3,4).astype('float32')\nb = np.random.rand(10,3,4).astype('float64')\nc = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)\ntest_close(c, b)\ntest_eq(a.dtype, c.dtype)\ntest_eq(is_memory_shared(a, c), True)\n\na = np.random.rand(10,3,4).astype('float32')\nb = 1\nc = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)\ntest_eq(a, np.ones_like(a).astype(a.dtype))\ntest_eq(a.dtype, c.dtype)\ntest_eq(is_memory_shared(a, c), True)\n\na = np.random.rand(10,3,4).astype('float32')\nb = 0.5\nc = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)\ntest_eq(a.dtype, c.dtype)\ntest_eq(is_memory_shared(a, c), True)\n\na = np.random.rand(10,3,4).astype('float32')\nb = 'rand'\nc = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)\ntest_eq(a.dtype, c.dtype)\ntest_eq(is_memory_shared(a, c), True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\ncreate_array\n\n create_array (shape, fname=None, path='./data', on_disk=True,\n dtype='float32', mode='r+', fill_value='rand',\n chunksize='auto', verbose=True, **kwargs)\n\nmode: ‘r’: Open existing file for reading only. ‘r+’: Open existing file for reading and writing. ‘w+’: Create or overwrite existing file for reading and writing. ‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only. fill_value: ‘rand’ (for random numbers), int or float chunksize = ‘auto’ to calculate chunks of 1GB, or any integer (for a given number of samples)\n\nfname = 'X_on_disk'\nshape = (100, 10, 10)\nX = create_array(shape, fname, on_disk=True, mode='r+')\ntest_ne(abs(X).sum(), 0)\nos.remove(X.filename)\ndel X\n\nauto chunksize: 100\n\n\n\n\n\n\n\n\n\n\nfname = 'X_on_disk'\nshape = (100, 10, 10)\nX = create_empty_array(shape, fname, on_disk=True, mode='r+')\ntest_eq(abs(X).sum(), 0)\n\nchunksize = 10\npbar = progress_bar(range(math.ceil(len(X) / chunksize)), leave=False)\nstart = 0\nfor i in pbar:\n end = min(start + chunksize, len(X))\n partial_data = np.random.rand(end - start, X.shape[1] , X.shape[2])\n X[start:end] = partial_data\n start = end\n del partial_data\n gc.collect()\nfilename = X.filename\ndel X\nX = np.load(filename, mmap_mode='r+')\ntest_eq((X == 0).sum(), 0)\ntest_eq(X.shape, shape)\nos.remove(X.filename)\ndel X\n\n\n\n\n\n\n\n\n\nsource\n\n\nnp_load_compressed\n\n np_load_compressed (fname=None, path='./data', **kwargs)\n\n\nsource\n\n\nnp_save_compressed\n\n np_save_compressed (arr, fname=None, path='./data', verbose=False,\n **kwargs)\n\n\nX1 = np.random.rand(10)\nnp_save_compressed(X1, 'X_comp', path='./data')\nX2 = np_load_compressed('X_comp')\ntest_eq(X1, X2)\n\n\nsource\n\n\nnp2memmap\n\n np2memmap (arr, fname=None, path='./data', dtype='float32', mode='c',\n **kwargs)\n\nFunction that turns an ndarray into a memmap ndarray mode: ‘r’: Open existing file for reading only. ‘r+’: Open existing file for reading and writing. ‘w+’: Create or overwrite existing file for reading and writing. ‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only.\n\nX1 = np.random.rand(10)\nX2 = np2memmap(X1, 'X1_test')\ntest_eq(X1, X2)\ntest_ne(type(X1), type(X2))\n\n\nsource\n\n\ntorch_mean_groupby\n\n torch_mean_groupby (o, idxs)\n\nComputes torch mean along axis 0 grouped by the idxs. Need to ensure that idxs have the same order as o\n\no = torch.arange(6*2*3).reshape(6, 2, 3).float()\nidxs = np.array([[0,1,2,3], [2,3]], dtype=object)\noutput = torch_mean_groupby(o, idxs)\ntest_eq(o[:2], output[:2])\ntest_eq(o[2:4].mean(0), output[2])\ntest_eq(o[4:6].mean(0), output[3])\n\n\nsource\n\n\ntorch_flip\n\n torch_flip (t, dims=-1)\n\n\nt = torch.randn(2, 3, 4)\ntest_eq(torch.flip(t, (2,)), torch_flip(t, dims=-1))\n\n\nsource\n\n\ntorch_masked_to_num\n\n torch_masked_to_num (o, mask, num=0, inplace=False)\n\n\nsource\n\n\ntorch_nan_to_num\n\n torch_nan_to_num (o, num=0, inplace=False)\n\n\nx = torch.rand(2, 4, 6)\nx[:, :3][x[:, :3] < .5] = np.nan\nnan_values = torch.isnan(x).sum()\ny = torch_nan_to_num(x[:, :3], inplace=False)\ntest_eq(torch.isnan(y).sum(), 0)\ntest_eq(torch.isnan(x).sum(), nan_values)\ntorch_nan_to_num(x[:, :3], inplace=True)\ntest_eq(torch.isnan(x).sum(), 0)\n\n\nx = torch.rand(2, 4, 6)\nmask = x[:, :3] > .5\nx[:, :3] = torch_masked_to_num(x[:, :3], mask, num=0, inplace=False)\ntest_eq(x[:, :3][mask].sum(), 0)\n\n\nx = torch.rand(2, 4, 6)\nmask = x[:, :3] > .5\ntorch_masked_to_num(x[:, :3], mask, num=0, inplace=True)\ntest_eq(x[:, :3][mask].sum(), 0)\n\n\nsource\n\n\nmpl_trend\n\n mpl_trend (x, y, deg=1)\n\n\nx = np.sort(np.random.randint(0, 100, 100)/10)\ny = np.random.rand(100) + np.linspace(0, 10, 100)\ntrend = mpl_trend(x, y)\nplt.scatter(x, y)\nplt.plot(x, trend, 'r')\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\narray2digits\n\n array2digits (o, n_digits=None, normalize=True)\n\n\nsource\n\n\nint2digits\n\n int2digits (o, n_digits=None, normalize=True)\n\n\no = -9645\ntest_eq(int2digits(o, 6), np.array([ 0, 0, -.9, -.6, -.4, -.5]))\n\na = np.random.randint(-1000, 1000, 10)\ntest_eq(array2digits(a,5).shape, (10,5))\n\n\nsource\n\n\nsincos_encoding\n\n sincos_encoding (seq_len, device=None, to_np=False)\n\n\nsin, cos = sincos_encoding(100)\nplt.plot(sin.cpu().numpy())\nplt.plot(cos.cpu().numpy())\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\nlinear_encoding\n\n linear_encoding (seq_len, device=None, to_np=False, lin_range=(-1, 1))\n\n\nlin = linear_encoding(100)\nplt.plot(lin.cpu().numpy())\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\nencode_positions\n\n encode_positions (pos_arr, min_val=None, max_val=None, linear=False,\n lin_range=(-1, 1))\n\nEncodes an array with positions using a linear or sincos methods\n\nn_samples = 10\nlength = 500\n_a = []\nfor i in range(n_samples):\n a = np.arange(-4000, 4000, 10)\n mask = np.random.rand(len(a)) > .5\n a = a[mask]\n a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])\n _a.append(a.reshape(-1,1))\na = np.concatenate(_a, -1).transpose(1,0)\nsin, cos = encode_positions(a, linear=False)\ntest_eq(a.shape, (n_samples, length))\ntest_eq(sin.shape, (n_samples, length))\ntest_eq(cos.shape, (n_samples, length))\nplt.plot(sin.T)\nplt.plot(cos.T)\nplt.xlim(0, 500)\nplt.show()\n\n\n\n\n\n\n\n\n\nn_samples = 10\nlength = 500\n_a = []\nfor i in range(n_samples):\n a = np.arange(-4000, 4000, 10)\n mask = np.random.rand(len(a)) > .5\n a = a[mask]\n a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])\n _a.append(a.reshape(-1,1))\na = np.concatenate(_a, -1).transpose(1,0)\nlin = encode_positions(a, linear=True)\ntest_eq(a.shape, (n_samples, length))\ntest_eq(lin.shape, (n_samples, length))\nplt.plot(lin.T)\nplt.xlim(0, 500)\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\nsort_generator\n\n sort_generator (generator, bs)\n\n\ngenerator = (i for i in np.random.permutation(np.arange(1000000)).tolist())\nl = list(sort_generator(generator, 512))\ntest_eq(l[:512], sorted(l[:512]))\n\n\nsource\n\n\nget_subset_dict\n\n get_subset_dict (d, keys)\n\n\nkeys = string.ascii_lowercase\nvalues = np.arange(len(keys))\nd = {k:v for k,v in zip(keys,values)}\ntest_eq(get_subset_dict(d, ['a', 'k', 'j', 'e']), {'a': 0, 'k': 10, 'j': 9, 'e': 4})\n\n\nsource\n\n\nremove_dir\n\n remove_dir (directory, verbose=True)\n\n\nsource\n\n\ncreate_dir\n\n create_dir (directory, verbose=True)\n\n\npath = \"wandb3/wandb2/wandb\"\ncreate_dir(path)\nassert Path(path).exists()\n\npaths = [\"wandb3/wandb2/wandb\", \"wandb3/wandb2\", \"wandb\"]\nremove_dir(paths)\nfor p in paths:\n assert not Path(p).exists()\n\npath = \"wandb3\"\nassert Path(path).exists()\nremove_dir(path)\nassert not Path(path).exists()\n\nwandb3/wandb2/wandb directory created.\nwandb3/wandb2/wandb directory removed.\nwandb3/wandb2 directory removed.\nwandb directory doesn't exist.\nwandb3 directory removed.\n\n\n\ncreate_dir('./test')\n\ntest directory created.\n\n\n\na = 5\ndef fn(b): return a + b\n\nWriting ./test/mod_dev.py\n\n\n\nfname = \"./test/mod_dev.py\"\nwhile True:\n if fname[0] in \"/ .\": fname = fname.split(fname[0], 1)[1]\n else: break\nif '/' in fname and fname.rsplit('/', 1)[0] not in sys.path: sys.path.append(fname.rsplit('/', 1)[0])\nmod = import_file_as_module(fname)\ntest_eq(mod.fn(3), 8)\nsys.path = sys.path[:-1]\nremove_dir('./test/')\n\ntest directory removed.\n\n\n\nsource\n\n\nnamed_partial\n\n named_partial (name, func, *args, **kwargs)\n\nCreate a partial function with a name\n\ndef add_1(x, add=1): return x+add\ntest_eq(add_1(1), 2)\nadd_2 = partial(add_1, add=2)\ntest_eq(add_2(2), 4)\ntest_ne(str(add_2), \"add_2\")\nadd_2 = named_partial('add_2', add_1, add=2)\ntest_eq(add_2(2), 4)\ntest_eq(str(add_2), \"add_2\")\n\nclass _A():\n def __init__(self, add=1): self.add = add\n def __call__(self, x): return x + self.add\n\ntest_eq(_A()(1), 2)\n_A2 = partial(_A, add=2)\ntest_eq(_A2()(1), 3)\ntest_ne(str(_A2), '_A2')\n_A2 = named_partial('_A2', _A, add=2)\ntest_eq(_A2()(1), 3)\ntest_eq(str(_A2), '_A2')\n\n\nsource\n\n\ndict2attrdict\n\n dict2attrdict (d:dict)\n\nConverts a (nested) dict to an AttrDict.\n\n\n\n\nType\nDetails\n\n\n\n\nd\ndict\na dict\n\n\n\n\nsource\n\n\nattrdict2dict\n\n attrdict2dict (d:dict)\n\nConverts a (nested) AttrDict dict to a dict.\n\n\n\n\nType\nDetails\n\n\n\n\nd\ndict\na dict\n\n\n\n\n# Test attrdict2dict\nd = AttrDict({'a': 1, 'b': AttrDict({'c': 2, 'd': 3})})\ntest_eq(attrdict2dict(d), {'a': 1, 'b': {'c': 2, 'd': 3}})\n# Test dict2attrdict\nd = {'a': 1, 'b': {'c': 2, 'd': 3}}\ntest_eq(dict2attrdict(d), AttrDict({'a': 1, 'b': AttrDict({'c': 2, 'd': 3})}))\n\n\nsource\n\n\nget_config\n\n get_config (file_path)\n\nGets a config from a yaml file.\n\nsource\n\n\nyaml2dict\n\n yaml2dict (file_path, attrdict=True)\n\nConverts a yaml file to a dict (optionally AttrDict).\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nfile_path\n\n\na path to a yaml file\n\n\nattrdict\nbool\nTrue\nif True, convert output to AttrDict\n\n\n\n\nsource\n\n\ndict2yaml\n\n dict2yaml (d, file_path, sort_keys=False)\n\nConverts a dict to a yaml file.\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd\n\n\na dict\n\n\nfile_path\n\n\na path to a yaml file\n\n\nsort_keys\nbool\nFalse\nif True, sort the keys\n\n\n\n\nprogram: wandb_scripts/train_script.py # (required) Path to training script.\nmethod: bayes # (required) Specify the search strategy: grid, random or bayes\nparameters: # (required) Specify parameters bounds to search.\n bs:\n values: [32, 64, 128]\n depth:\n values: [3, 6, 9, 12]\n fc_dropout:\n distribution: uniform\n min: 0.\n max: 0.5\n lr_max:\n values: [0.001, 0.003, 0.01, 0.03, 0.1]\n n_epoch:\n values: [10, 15, 20]\n nb_filters:\n values: [32, 64, 128]\nname: LSST_sweep_01\nmetric:\n name: accuracy # This must match one of the metrics in the training script\n goal: maximize\nearly_terminate:\n type: hyperband\n min_iter: 3\nproject: LSST_wandb_hpo\n\nWriting sweep_config.yaml\n\n\n\nfname = \"sweep_config.yaml\"\nsweep_config = yaml2dict(fname)\nprint(sweep_config)\ntest_eq(sweep_config.method, 'bayes')\ntest_eq(sweep_config['metric'], {'name': 'accuracy', 'goal': 'maximize'})\nos.remove(fname)\n\n{'program': 'wandb_scripts/train_script.py', 'method': 'bayes', 'parameters': {'bs': {'values': [32, 64, 128]}, 'depth': {'values': [3, 6, 9, 12]}, 'fc_dropout': {'distribution': 'uniform', 'min': 0.0, 'max': 0.5}, 'lr_max': {'values': [0.001, 0.003, 0.01, 0.03, 0.1]}, 'n_epoch': {'values': [10, 15, 20]}, 'nb_filters': {'values': [32, 64, 128]}}, 'name': 'LSST_sweep_01', 'metric': {'name': 'accuracy', 'goal': 'maximize'}, 'early_terminate': {'type': 'hyperband', 'min_iter': 3}, 'project': 'LSST_wandb_hpo'}\n\n\n\nsource\n\n\nget_cat_cols\n\n get_cat_cols (df)\n\n\nsource\n\n\nget_cont_cols\n\n get_cont_cols (df)\n\n\nsource\n\n\nstr2index\n\n str2index (o)\n\n\nsource\n\n\nstr2list\n\n str2list (o)\n\n\nsource\n\n\nmap_array\n\n map_array (arr, dim=1)\n\n\nsource\n\n\nget_mapping\n\n get_mapping (arr, dim=1, return_counts=False)\n\n\na = np.asarray(alphabet[np.random.randint(0,15,30)]).reshape(10,3)\nb = np.asarray(ALPHABET[np.random.randint(6,10,30)]).reshape(10,3)\nx = concat(a,b,dim=1)\nmaps, counts = get_mapping(x, dim=1, return_counts=True)\nx, maps, counts\n\n(array([['d', 'k', 'l', 'I', 'I', 'G'],\n ['g', 'i', 'l', 'I', 'J', 'I'],\n ['e', 'l', 'n', 'G', 'H', 'I'],\n ['e', 'l', 'a', 'I', 'H', 'G'],\n ['k', 'l', 'b', 'I', 'I', 'J'],\n ['c', 'f', 'k', 'I', 'H', 'I'],\n ['e', 'j', 'f', 'I', 'H', 'J'],\n ['n', 'd', 'g', 'G', 'J', 'J'],\n ['d', 'f', 'a', 'I', 'H', 'H'],\n ['i', 'c', 'm', 'J', 'G', 'G']], dtype='<U1'),\n [(#7) ['c','d','e','g','i','k','n'],\n (#7) ['c','d','f','i','j','k','l'],\n (#8) ['a','b','f','g','k','l','m','n'],\n (#3) ['G','I','J'],\n (#4) ['G','H','I','J'],\n (#4) ['G','H','I','J']],\n [7, 7, 8, 3, 4, 4])\n\n\n\nx = np.asarray(alphabet[np.random.randint(0,15,30)]).reshape(10,3)\nx, map_array(x), map_array(x, 1)\n\n(array([['i', 'm', 'd'],\n ['h', 'm', 'g'],\n ['i', 'g', 'd'],\n ['k', 'm', 'n'],\n ['n', 'j', 'l'],\n ['n', 'l', 'i'],\n ['f', 'c', 'k'],\n ['i', 'm', 'a'],\n ['l', 'i', 'f'],\n ['k', 'o', 'g']], dtype='<U1'),\n array([[2, 5, 1],\n [1, 5, 3],\n [2, 1, 1],\n [3, 5, 7],\n [5, 3, 6],\n [5, 4, 4],\n [0, 0, 5],\n [2, 5, 0],\n [4, 2, 2],\n [3, 6, 3]]),\n array([[2, 5, 1],\n [1, 5, 3],\n [2, 1, 1],\n [3, 5, 7],\n [5, 3, 6],\n [5, 4, 4],\n [0, 0, 5],\n [2, 5, 0],\n [4, 2, 2],\n [3, 6, 3]]))\n\n\n\nsource\n\n\nlog_tfm\n\n log_tfm (o, inplace=False)\n\nLog transforms an array-like object with positive and/or negative values\n\narr = np.asarray([-1000, -100, -10, -1, 0, 1, 10, 100, 1000]).astype(float)\nplt.plot(arr, log_tfm(arr, False))\nplt.show()\n\n\n\n\n\n\n\n\n\nt = tensor([-1000, -100, -10, -1, 0, 1, 10, 100, 1000]).float()\nplt.plot(t, log_tfm(t, False))\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\nto_sincos_time\n\n to_sincos_time (arr, max_value)\n\n\narr = np.sort(np.random.rand(100) * 5)\narr_sin, arr_cos = to_sincos_time(arr, 5)\nplt.scatter(arr, arr_sin)\nplt.scatter(arr, arr_cos)\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\nplot_feature_dist\n\n plot_feature_dist (X, percentiles=[0, 0.1, 0.5, 1, 5, 10, 25, 50, 75, 90,\n 95, 99, 99.5, 99.9, 100])\n\n\narr = np.random.rand(10, 3, 100)\nplot_feature_dist(arr, percentiles=[0,0.1,0.5,1,5,10,25,50,75,90,95,99,99.5,99.9,100])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nrolling_moving_average\n\n rolling_moving_average (o, window=2)\n\n\na = np.arange(60).reshape(2,3,10).astype(float)\nt = torch.arange(60).reshape(2,3,10).float()\ntest_close(rolling_moving_average(a, window=3), rolling_moving_average(t, window=3).numpy())\nprint(t)\nprint(rolling_moving_average(t, window=3))\n\ntensor([[[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.]],\n\n [[30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],\n [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],\n [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.]]])\ntensor([[[ 0.0000, 0.5000, 1.0000, 2.0000, 3.0000, 4.0000, 5.0000,\n 6.0000, 7.0000, 8.0000],\n [10.0000, 10.5000, 11.0000, 12.0000, 13.0000, 14.0000, 15.0000,\n 16.0000, 17.0000, 18.0000],\n [20.0000, 20.5000, 21.0000, 22.0000, 23.0000, 24.0000, 25.0000,\n 26.0000, 27.0000, 28.0000]],\n\n [[30.0000, 30.5000, 31.0000, 32.0000, 33.0000, 34.0000, 35.0000,\n 36.0000, 37.0000, 38.0000],\n [40.0000, 40.5000, 41.0000, 42.0000, 43.0000, 44.0000, 45.0000,\n 46.0000, 47.0000, 48.0000],\n [50.0000, 50.5000, 51.0000, 52.0000, 53.0000, 54.0000, 55.0000,\n 56.0000, 57.0000, 58.0000]]])\n\n\n\nsource\n\n\nfbfill_sequence\n\n fbfill_sequence (o)\n\nForward and backward fills an array-like object alongside sequence dimension\n\nsource\n\n\nbfill_sequence\n\n bfill_sequence (o)\n\nBackward fills an array-like object alongside sequence dimension\n\nsource\n\n\nffill_sequence\n\n ffill_sequence (o)\n\nForward fills an array-like object alongside sequence dimension\n\na = np.arange(80).reshape(2, 4, 10).astype(float)\nmask = np.random.rand(*a.shape)\na[mask > .8] = np.nan\nt = torch.from_numpy(a)\nt\n\ntensor([[[ 0., 1., 2., 3., 4., 5., 6., 7., 8., nan],\n [10., 11., nan, nan, 14., 15., nan, 17., nan, 19.],\n [20., 21., 22., 23., nan, 25., 26., 27., 28., 29.],\n [30., 31., 32., 33., nan, 35., 36., 37., 38., 39.]],\n\n [[40., 41., 42., 43., 44., 45., 46., 47., nan, 49.],\n [nan, 51., nan, 53., 54., 55., nan, 57., 58., 59.],\n [60., 61., 62., 63., 64., nan, nan, 67., 68., 69.],\n [70., nan, 72., 73., 74., 75., 76., nan, 78., 79.]]],\n dtype=torch.float64)\n\n\n\n# forward fill\nfilled_a = ffill_sequence(a)\nprint(filled_a)\nm = np.isnan(filled_a)\ntest_eq(filled_a[~m], ffill_sequence(t).numpy()[~m])\n\n[[[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 8.]\n [10. 11. 11. 11. 14. 15. 15. 17. 17. 19.]\n [20. 21. 22. 23. 23. 25. 26. 27. 28. 29.]\n [30. 31. 32. 33. 33. 35. 36. 37. 38. 39.]]\n\n [[40. 41. 42. 43. 44. 45. 46. 47. 47. 49.]\n [nan 51. 51. 53. 54. 55. 55. 57. 58. 59.]\n [60. 61. 62. 63. 64. 64. 64. 67. 68. 69.]\n [70. 70. 72. 73. 74. 75. 76. 76. 78. 79.]]]\n\n\n\n# backward fill\nfilled_a = bfill_sequence(a)\nprint(filled_a)\nm = np.isnan(filled_a)\ntest_eq(filled_a[~m], bfill_sequence(t).numpy()[~m])\n\n[[[ 0. 1. 2. 3. 4. 5. 6. 7. 8. nan]\n [10. 11. 14. 14. 14. 15. 17. 17. 19. 19.]\n [20. 21. 22. 23. 25. 25. 26. 27. 28. 29.]\n [30. 31. 32. 33. 35. 35. 36. 37. 38. 39.]]\n\n [[40. 41. 42. 43. 44. 45. 46. 47. 49. 49.]\n [51. 51. 53. 53. 54. 55. 57. 57. 58. 59.]\n [60. 61. 62. 63. 64. 67. 67. 67. 68. 69.]\n [70. 72. 72. 73. 74. 75. 76. 78. 78. 79.]]]\n\n\n\n# forward & backward fill\nfilled_a = fbfill_sequence(a)\nprint(filled_a)\nm = np.isnan(filled_a)\ntest_eq(filled_a[~m], fbfill_sequence(t).numpy()[~m])\n\n[[[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 8.]\n [10. 11. 11. 11. 14. 15. 15. 17. 17. 19.]\n [20. 21. 22. 23. 23. 25. 26. 27. 28. 29.]\n [30. 31. 32. 33. 33. 35. 36. 37. 38. 39.]]\n\n [[40. 41. 42. 43. 44. 45. 46. 47. 47. 49.]\n [51. 51. 51. 53. 54. 55. 55. 57. 58. 59.]\n [60. 61. 62. 63. 64. 64. 64. 67. 68. 69.]\n [70. 70. 72. 73. 74. 75. 76. 76. 78. 79.]]]\n\n\n\nsource\n\n\ndummify\n\n dummify (o:Union[numpy.ndarray,torch.Tensor], by_var:bool=True,\n inplace:bool=False, skip:Optional[list]=None, random_state=None)\n\nShuffles an array-like object along all dimensions or dimension 1 (variables) if by_var is True.\n\narr = np.random.rand(2,3,10)\narr_original = arr.copy()\ndummy_arr = dummify(arr)\ntest_ne(arr_original, dummy_arr)\ntest_eq(arr_original, arr)\ndummify(arr, inplace=True)\ntest_ne(arr_original, arr)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nt = torch.rand(2,3,10)\nt_original = t.clone()\ndummy_t = dummify(t)\ntest_ne(t_original, dummy_t)\ntest_eq(t_original, t)\ndummify(t, inplace=True)\ntest_ne(t_original, t)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nshuffle_along_axis\n\n shuffle_along_axis (o, axis=-1, random_state=None)\n\n\nX = np.arange(60).reshape(2,3,10) + 10\nX_shuffled = shuffle_along_axis(X,(0, -1), random_state=23)\ntest_eq(X_shuffled, np.array([[[13, 15, 41, 14, 40, 49, 18, 42, 47, 46],\n [28, 56, 53, 50, 52, 25, 24, 57, 51, 59],\n [34, 30, 38, 35, 69, 66, 63, 67, 61, 62]],\n\n [[19, 10, 11, 16, 43, 12, 17, 48, 45, 44],\n [23, 20, 26, 22, 21, 27, 58, 29, 54, 55],\n [36, 31, 39, 60, 33, 68, 37, 32, 65, 64]]]))\n\n\nsource\n\n\nanalyze_array\n\n analyze_array (o, bins=100, density=False, feature_names=None,\n clip_outliers_plot=False, quantile_range=(25.0, 75.0),\n percentiles=[1, 25, 50, 75, 99], text_len=12, figsize=(10,\n 6))\n\n\nsource\n\n\nanalyze_feature\n\n analyze_feature (feature, bins=100, density=False, feature_name=None,\n clip_outliers_plot=False, quantile_range=(25.0, 75.0),\n percentiles=[1, 25, 50, 75, 99], text_len=12,\n figsize=(10, 6))\n\n\nx = np.random.normal(size=(1000))\nanalyze_array(x)\n\n array shape: (1000,)\n dtype: float64\n nan values: 0.0%\n max: 3.581094060980321\n 1: -2.1615590829115185\n 25: -0.5910961139851849\n 50: -0.002247946765973052\n 75: 0.6259274030927355\n 99: 2.3412961380708084\n min: -2.9413736207935037\n outlier min: -2.416631389602066\n outlier max: 2.4514626787096163\n outliers: 1.3%\n mean: 0.0252125277963861\n std: 0.946955486669799\n normal dist: True\n\n\n\n\n\n\n\n\n\n\nx1 = np.random.normal(size=(1000,2))\nx2 = np.random.normal(3, 5, size=(1000,2))\nx = x1 + x2\nanalyze_array(x)\n\n array shape: (1000, 2)\n\n 0 feature: 0\n\n dtype: float64\n nan values: 0.0%\n max: 20.323075761234193\n 1: -8.260661592413742\n 25: -0.6268118569038604\n 50: 2.7491159998190335\n 75: 6.1659732833324234\n 99: 15.387037197243288\n min: -13.122296090020368\n outlier min: -10.815989567258287\n outlier max: 16.35515099368685\n outliers: 0.9%\n mean: 2.9347218553275445\n std: 5.134940196769919\n normal dist: True\n\n 1 feature: 1\n\n dtype: float64\n nan values: 0.0%\n max: 19.86661808715871\n 1: -8.727124941895372\n 25: -0.45908489661153007\n 50: 2.875134866985423\n 75: 6.288434737224429\n 99: 14.424046274543118\n min: -10.963913297285615\n outlier min: -10.58036434736547\n outlier max: 16.409714187978366\n outliers: 0.6%\n mean: 2.9552584127690014\n std: 4.99683092772426\n normal dist: True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nget_relpath\n\n get_relpath (path)\n\n\nsource\n\n\nto_root_path\n\n to_root_path (path)\n\nConverts a path to an absolute path from the root directory of the repository.\n\nsource\n\n\nget_root\n\n get_root ()\n\nReturns the root directory of the git repository.\n\nsource\n\n\nsplit_in_chunks\n\n split_in_chunks (o, chunksize, start=0, shuffle=False, drop_last=False)\n\n\na = np.arange(5, 15)\ntest_eq(split_in_chunks(a, 3, drop_last=False), [array([5, 6, 7]), array([ 8, 9, 10]), array([11, 12, 13]), array([14])])\ntest_eq(split_in_chunks(a, 3, drop_last=True), [array([5, 6, 7]), array([ 8, 9, 10]), array([11, 12, 13])])\ntest_eq(split_in_chunks(a, 3, start=2, drop_last=True), [array([7, 8, 9]), array([10, 11, 12])])\n\n\nsource\n\n\nload_object\n\n load_object (file_path)\n\n\nsource\n\n\nsave_object\n\n save_object (o, file_path, verbose=True)\n\n\nsplit = np.arange(100)\nsave_object(split, file_path='data/test')\nsplit2 = load_object('data/test.pkl')\ntest_eq(split, split2)\n\ndata directory already exists.\nndarray saved as data/test.pkl\n\n\n\nsplits = L([[[0,1,2,3,4], [5,6,7,8,9]],[[10,11,12,13,14], [15,16,17,18,19]]])\nsave_object(splits, file_path=Path('data/test'))\nsplits2 = load_object('data/test')\ntest_eq(splits, splits2)\n\ndata directory already exists.\nL saved as data/test.pkl\n\n\n\nsource\n\n\nget_idxs_to_keep\n\n get_idxs_to_keep (o, cond, crit='all', invert=False, axis=(1, 2),\n keepdims=False)\n\n\na = np.random.rand(100, 2, 10)\na[a > .95] = np.nan\nidxs_to_keep = get_idxs_to_keep(a, np.isfinite)\nif idxs_to_keep.size>0:\n test_eq(np.isnan(a[idxs_to_keep]).sum(), 0)\n\n\nsource\n\n\nzerofy\n\n zerofy (a, stride, keep=False)\n\nCreate copies of an array setting individual/ group values to zero\n\nstride = 3\na = np.arange(2*5).reshape(2,5) + 1\n\nzerofy(a, stride, keep=False)\n\narray([[[ 0., 0., 3., 4., 5.],\n [ 6., 7., 8., 9., 10.]],\n\n [[ 1., 2., 0., 0., 0.],\n [ 6., 7., 8., 9., 10.]],\n\n [[ 1., 2., 3., 4., 5.],\n [ 0., 0., 8., 9., 10.]],\n\n [[ 1., 2., 3., 4., 5.],\n [ 6., 7., 0., 0., 0.]]])\n\n\n\nsource\n\n\nfeat2list\n\n feat2list (o)\n\n\na = 'a'\ntest_eq(feat2list(a), ['a'])\na = ['a', 'b']\ntest_eq(feat2list(a), ['a', 'b'])\na = None\ntest_eq(feat2list(a), [])\n\n\nsource\n\n\nsmallest_dtype\n\n smallest_dtype (num, use_unsigned=False)\n\nFind the smallest dtype that can safely hold num\n\ntest_eq(smallest_dtype(3654), 'int16')\ntest_eq(smallest_dtype(2048.), 'float16')\ntest_eq(smallest_dtype(365454), 'int32')\ntest_eq(smallest_dtype(365454.), 'float32')\ntest_eq(smallest_dtype(3654545134897), 'int64')\n\n\nsource\n\n\nplot_forecast\n\n plot_forecast (X_true, y_true, y_pred, sel_vars=None, idx=None,\n figsize=(8, 4), n_samples=1)\n\n\nsource\n\n\nstr2callable\n\n str2callable (object_path:str=None)\n\nTransform a string into a callable object without importing it in the script.\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nobject_path\nstr\nNone\nThe string representing the object path.\n\n\n\n\n# test showing you don't need to import the object in the script. The library needs to be installed though.\ntry:\n pyts\nexcept Exception as e:\n print(0, e)\ntry:\n pyts.image\nexcept Exception as e:\n print(1, e)\ntry:\n gasf = eval(\"pyts.image.GramianAngularField(method='summation')\")\n print(f\"2 success: {gasf}\")\nexcept Exception as e:\n print(2, e)\ntry:\n gasf = str2callable(\"pyts.image.GramianAngularField(method='summation')\")\n print(f\"3 success: {gasf}\")\nexcept Exception as e:\n print(3, e)\n\n0 name 'pyts' is not defined\n1 name 'pyts' is not defined\n2 name 'pyts' is not defined\n3 success: GramianAngularField()", + "crumbs": [ + "Utilities" + ] + }, + { + "objectID": "models.inceptiontimeplus.html", + "href": "models.inceptiontimeplus.html", + "title": "InceptionTimePlus", + "section": "", + "text": "This is an unofficial PyTorch implementation of InceptionTime (Fawaz, 2019) created by Ignacio Oguiza.\n\nReferences: * Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J., … & Petitjean, F. (2020). Inceptiontime: Finding alexnet for time series classification. Data Mining and Knowledge Discovery, 34(6), 1936-1962. * Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime\n\nsource\n\nInceptionBlockPlus\n\n InceptionBlockPlus (ni, nf, residual=True, depth=6, coord=False,\n norm='Batch', zero_norm=False, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={},\n sa=False, se=None, stoch_depth=1.0, ks=40,\n bottleneck=True, padding='same', separable=False,\n dilation=1, stride=1, conv_dropout=0.0, bn_1st=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nInceptionModulePlus\n\n InceptionModulePlus (ni, nf, ks=40, bottleneck=True, padding='same',\n coord=False, separable=False, dilation=1, stride=1,\n conv_dropout=0.0, sa=False, se=None, norm='Batch',\n zero_norm=False, bn_1st=True, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nInceptionTimePlus\n\n InceptionTimePlus (c_in, c_out, seq_len=None, nf=32, nb_filters=None,\n flatten=False, concat_pool=False, fc_dropout=0.0,\n bn=False, y_range=None, custom_head=None, ks=40,\n bottleneck=True, padding='same', coord=False,\n separable=False, dilation=1, stride=1,\n conv_dropout=0.0, sa=False, se=None, norm='Batch',\n zero_norm=False, bn_1st=True, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nXCoordTime\n\n XCoordTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None,\n flatten=False, concat_pool=False, fc_dropout=0.0, bn=False,\n y_range=None, custom_head=None, ks=40, bottleneck=True,\n padding='same', coord=False, separable=False, dilation=1,\n stride=1, conv_dropout=0.0, sa=False, se=None, norm='Batch',\n zero_norm=False, bn_1st=True, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nInCoordTime\n\n InCoordTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None,\n flatten=False, concat_pool=False, fc_dropout=0.0, bn=False,\n y_range=None, custom_head=None, ks=40, bottleneck=True,\n padding='same', coord=False, separable=False, dilation=1,\n stride=1, conv_dropout=0.0, sa=False, se=None, norm='Batch',\n zero_norm=False, bn_1st=True, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.data.core import TSCategorize\nfrom tsai.models.utils import count_parameters\n\n\nbs = 16\nn_vars = 3\nseq_len = 51\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\n\ntest_eq(InceptionTimePlus(n_vars,c_out)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out,concat_pool=True)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, residual=False)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, conv_dropout=.5)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, stoch_depth=.5)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars, c_out, seq_len=seq_len, zero_norm=True, flatten=True)(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True, \n norm='Instance', zero_norm=True, bn_1st=False, fc_dropout=.5, sa=True, se=True, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])\ntest_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True,\n norm='Instance', zero_norm=True, bn_1st=False, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])\ntest_eq(count_parameters(InceptionTimePlus(3, 2)), 455490)\ntest_eq(count_parameters(InceptionTimePlus(6, 2, **{'coord': True, 'separable': True, 'zero_norm': True})), 77204)\ntest_eq(count_parameters(InceptionTimePlus(3, 2, ks=40)), count_parameters(InceptionTimePlus(3, 2, ks=[9, 19, 39])))\n\n\nbs = 16\nn_vars = 3\nseq_len = 51\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\n\nmodel = InceptionTimePlus(n_vars, c_out)\nmodel(xb).shape\ntest_eq(model[0](xb), model.backbone(xb))\ntest_eq(model[1](model[0](xb)), model.head(model[0](xb)))\ntest_eq(model[1].state_dict().keys(), model.head.state_dict().keys())\ntest_eq(len(ts_splitter(model)), 2)\n\n\ntest_eq(check_bias(InceptionTimePlus(2,3, zero_norm=True), is_conv)[0].sum(), 0)\ntest_eq(check_weight(InceptionTimePlus(2,3, zero_norm=True), is_bn)[0].sum(), 6)\ntest_eq(check_weight(InceptionTimePlus(2,3), is_bn)[0], np.array([1., 1., 1., 1., 1., 1., 1., 1.]))\n\n\nfor i in range(10): InceptionTimePlus(n_vars,c_out,stoch_depth=0.8,depth=9,zero_norm=True)(xb)\n\n\nnet = InceptionTimePlus(2,3,**{'coord': True, 'separable': True, 'zero_norm': True})\ntest_eq(check_weight(net, is_bn)[0], np.array([1., 1., 0., 1., 1., 0., 1., 1.]))\nnet\n\nInceptionTimePlus(\n (backbone): Sequential(\n (0): InceptionBlockPlus(\n (inception): ModuleList(\n (0): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (1): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (2): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (3): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (4): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (5): InceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n (norm): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (shortcut): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (act): ModuleList(\n (0): ReLU()\n (1): ReLU()\n )\n (add): Add\n )\n )\n (head): Sequential(\n (0): Sequential(\n (0): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=128, out_features=3, bias=True)\n )\n )\n )\n)\n\n\n\nsource\n\n\nMultiInceptionTimePlus\n\n MultiInceptionTimePlus (feat_list, c_out, seq_len=None, nf=32,\n nb_filters=None, depth=6, stoch_depth=1.0,\n flatten=False, concat_pool=False, fc_dropout=0.0,\n bn=False, y_range=None, custom_head=None)\n\nClass that allows you to create a model with multiple branches of InceptionTimePlus.\n\nbs = 16\nn_vars = 3\nseq_len = 51\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\n\ntest_eq(count_parameters(MultiInceptionTimePlus([1,1,1], c_out)) > count_parameters(MultiInceptionTimePlus(3, c_out)), True)\ntest_eq(MultiInceptionTimePlus([1,1,1], c_out).to(xb.device)(xb).shape, MultiInceptionTimePlus(3, c_out).to(xb.device)(xb).shape)\n\n[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.\n\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 10\nxb = torch.rand(bs, n_vars, seq_len)\nnew_head = partial(conv_lin_nd_head, d=(5,2))\nnet = MultiInceptionTimePlus(n_vars, c_out, seq_len, custom_head=new_head)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 5, 2, 10])\n\n\nSequential(\n (0): create_conv_lin_nd_head(\n (0): Conv1d(128, 10, kernel_size=(1,), stride=(1,))\n (1): Linear(in_features=12, out_features=10, bias=True)\n (2): Transpose(-1, -2)\n (3): Reshape(bs, 5, 2, 10)\n )\n)\n\n\n\nbs = 16\nn_vars = 6\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\nnet = MultiInceptionTimePlus([1,2,3], c_out, seq_len)\nprint(net.to(xb.device)(xb).shape)\nnet.head\n\ntorch.Size([16, 2])\n\n\nSequential(\n (0): Sequential(\n (0): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=384, out_features=2, bias=True)\n )\n )\n)\n\n\n\nbs = 8\nc_in = 7 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 10\nxb2 = torch.randn(bs, c_in, seq_len)\nmodel1 = MultiInceptionTimePlus([2, 5], c_out, seq_len)\nmodel2 = MultiInceptionTimePlus([[0,2,5], [0,1,3,4,6]], c_out, seq_len)\ntest_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))\ntest_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)\n\n\nfrom tsai.data.external import *\nfrom tsai.data.core import *\nfrom tsai.data.preprocessing import *\n\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, [TSCategorize()]]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nmodel = InceptionTimePlus(dls.vars, dls.c, dls.len)\nxb,yb=first(dls.train)\ntest_eq(model.to(xb.device)(xb).shape, (dls.bs, dls.c))\ntest_eq(count_parameters(model), 460038)\n\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, [TSCategorize()]]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nmodel = MultiInceptionTimePlus([4, 15, 5], dls.c, dls.len)\nxb,yb=first(dls.train)\ntest_eq(model.to(xb.device)(xb).shape, (dls.bs, dls.c))\ntest_eq(count_parameters(model), 1370886)", + "crumbs": [ + "Models", + "CNNs", + "InceptionTimePlus" + ] + }, + { + "objectID": "data.mixed_augmentation.html", + "href": "data.mixed_augmentation.html", + "title": "Label-mixing transforms", + "section": "", + "text": "Callbacks that perform data augmentation by mixing samples in different ways.\n\n\nsource\n\nMixHandler1d\n\n MixHandler1d (alpha=0.5)\n\nA handler class for implementing mixed sample data augmentation\n\nsource\n\n\nMixUp1d\n\n MixUp1d (alpha=0.4)\n\nImplementation of https://arxiv.org/abs/1710.09412\n\nfrom fastai.learner import *\nfrom tsai.models.InceptionTime import *\nfrom tsai.data.external import get_UCR_data\nfrom tsai.data.core import get_ts_dls, TSCategorize\nfrom tsai.data.preprocessing import TSStandardize\nfrom tsai.learner import ts_learner\n\n\nX, y, splits = get_UCR_data('NATOPS', return_split=False)\ntfms = [None, TSCategorize()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, InceptionTime, cbs=MixUp1d(0.4))\nlearn.fit_one_cycle(1)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\ntime\n\n\n\n\n0\n1.908455\n1.811908\n00:03\n\n\n\n\n\n\nsource\n\n\nCutMix1d\n\n CutMix1d (alpha=1.0)\n\nImplementation of https://arxiv.org/abs/1905.04899\n\nsource\n\n\nIntraClassCutMix1d\n\n IntraClassCutMix1d (alpha=1.0)\n\nImplementation of CutMix applied to examples of the same class\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, TSCategorize()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, InceptionTime, cbs=IntraClassCutMix1d())\nlearn.fit_one_cycle(1)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\ntime\n\n\n\n\n0\n1.813483\n1.792010\n00:03\n\n\n\n\n\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, TSCategorize()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, cbs=CutMix1d(1.))\nlearn.fit_one_cycle(1)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\ntime\n\n\n\n\n0\n1.824509\n1.774964\n00:04", + "crumbs": [ + "Data", + "Label-mixing transforms" + ] + }, + { + "objectID": "data.tabular.html", + "href": "data.tabular.html", + "title": "Time Series Tabular Data", + "section": "", + "text": "Main Tabular functions used throughout the library. This is helpful when you have additional time series data like metadata, time series features, etc.\n\n\nsource\n\nget_tabular_ds\n\n get_tabular_ds (df, procs=[<class 'fastai.tabular.core.Categorify'>,\n <class 'fastai.tabular.core.FillMissing'>, <class\n 'fastai.data.transforms.Normalize'>], cat_names=None,\n cont_names=None, y_names=None, groupby=None,\n y_block=None, splits=None, do_setup=True, inplace=False,\n reduce_memory=True, device=None)\n\n\nsource\n\n\nget_tabular_dls\n\n get_tabular_dls (df, procs=[<class 'fastai.tabular.core.Categorify'>,\n <class 'fastai.tabular.core.FillMissing'>, <class\n 'fastai.data.transforms.Normalize'>], cat_names=None,\n cont_names=None, y_names=None, bs=64, y_block=None,\n splits=None, do_setup=True, inplace=False,\n reduce_memory=True, device=None,\n path:Union[str,pathlib.Path]='.')\n\n\nsource\n\n\npreprocess_df\n\n preprocess_df (df, procs=[<class 'fastai.tabular.core.Categorify'>,\n <class 'fastai.tabular.core.FillMissing'>, <class\n 'fastai.data.transforms.Normalize'>], cat_names=None,\n cont_names=None, y_names=None, sample_col=None,\n reduce_memory=True)\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\n# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable\n\ncat_names = ['workclass', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',\n 'capital-gain', 'capital-loss', 'native-country']\ncont_names = ['age', 'fnlwgt', 'hours-per-week']\ntarget = ['salary']\nsplits = RandomSplitter()(range_of(df))\n\ndls = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names='salary', splits=splits, bs=512, device=device)\ndls.show_batch()\n\n\n\n\n\nworkclass\neducation\neducation-num\nmarital-status\noccupation\nrelationship\nrace\nsex\ncapital-gain\ncapital-loss\nnative-country\nage\nfnlwgt\nhours-per-week\nsalary\n\n\n\n\n0\nPrivate\nSome-college\n10.0\nDivorced\nExec-managerial\nNot-in-family\nWhite\nMale\n0\n0\nUnited-States\n48.000000\n190072.000005\n50.000000\n>=50k\n\n\n1\nSelf-emp-not-inc\nSome-college\n10.0\nMarried-civ-spouse\nSales\nHusband\nWhite\nMale\n0\n0\nUnited-States\n72.000001\n284120.002964\n40.000000\n<50k\n\n\n2\nPrivate\nSome-college\n10.0\nMarried-civ-spouse\nProtective-serv\nHusband\nBlack\nMale\n0\n0\nUnited-States\n72.000001\n53684.002497\n40.000000\n<50k\n\n\n3\nSelf-emp-inc\nSome-college\n10.0\nMarried-civ-spouse\nFarming-fishing\nHusband\nWhite\nMale\n0\n0\nUnited-States\n47.000000\n337049.998875\n40.000000\n<50k\n\n\n4\nPrivate\nHS-grad\n9.0\nDivorced\nCraft-repair\nNot-in-family\nWhite\nMale\n0\n0\nUnited-States\n46.000000\n207677.000707\n30.000000\n<50k\n\n\n5\nPrivate\n5th-6th\n3.0\nDivorced\nPriv-house-serv\nUnmarried\nWhite\nFemale\n0\n0\nMexico\n45.000000\n265082.999142\n35.000000\n<50k\n\n\n6\nPrivate\nAssoc-acdm\n12.0\nNever-married\nOther-service\nNot-in-family\nWhite\nFemale\n0\n0\nUnited-States\n28.000000\n150296.001328\n79.999999\n<50k\n\n\n7\nPrivate\nHS-grad\n9.0\nMarried-civ-spouse\nExec-managerial\nHusband\nWhite\nMale\n0\n0\nUnited-States\n50.000000\n94080.999353\n40.000000\n>=50k\n\n\n8\nPrivate\nAssoc-voc\n11.0\nMarried-civ-spouse\nExec-managerial\nHusband\nWhite\nMale\n0\n0\nGermany\n58.000000\n235624.000302\n40.000000\n>=50k\n\n\n9\nPrivate\nHS-grad\n9.0\nNever-married\nOther-service\nUnmarried\nBlack\nFemale\n0\n0\nJapan\n29.000000\n419721.008996\n40.000000\n<50k\n\n\n\n\n\n\nmetrics = mae if dls.c == 1 else accuracy\nlearn = tabular_learner(dls, layers=[200, 100], y_range=None, metrics=metrics)\nlearn.fit(1, 1e-2)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n0.349525\n0.288922\n0.866093\n00:05\n\n\n\n\n\n\nlearn.dls.one_batch()\n\n(tensor([[ 5, 12, 9, ..., 1, 1, 21],\n [ 1, 10, 13, ..., 1, 1, 3],\n [ 5, 4, 2, ..., 1, 1, 6],\n ...,\n [ 5, 6, 4, ..., 1, 1, 40],\n [ 3, 10, 13, ..., 1, 1, 40],\n [ 5, 12, 9, ..., 116, 1, 40]]),\n tensor([[-0.2593, 0.1234, 1.1829],\n [-0.9913, -1.4041, -0.0347],\n [-0.1129, 0.4583, -0.0347],\n ...,\n [-1.5769, -0.1989, 0.3712],\n [ 0.4727, -1.4400, 0.3712],\n [ 1.5708, -0.2222, -0.0347]]),\n tensor([[1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [1],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [1],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [1],\n [1],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [1],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [1],\n [0],\n [0],\n [1],\n [1]], dtype=torch.int8))\n\n\n\nlearn.model\n\nTabularModel(\n (embeds): ModuleList(\n (0): Embedding(10, 6)\n (1): Embedding(17, 8)\n (2): Embedding(17, 8)\n (3): Embedding(8, 5)\n (4): Embedding(16, 8)\n (5): Embedding(7, 5)\n (6): Embedding(6, 4)\n (7): Embedding(3, 3)\n (8): Embedding(117, 23)\n (9): Embedding(90, 20)\n (10): Embedding(43, 13)\n )\n (emb_drop): Dropout(p=0.0, inplace=False)\n (bn_cont): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (layers): Sequential(\n (0): LinBnDrop(\n (0): Linear(in_features=106, out_features=200, bias=False)\n (1): ReLU(inplace=True)\n (2): BatchNorm1d(200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): LinBnDrop(\n (0): Linear(in_features=200, out_features=100, bias=False)\n (1): ReLU(inplace=True)\n (2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): LinBnDrop(\n (0): Linear(in_features=100, out_features=2, bias=True)\n )\n )\n)\n\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\ncat_names = ['workclass', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',\n 'capital-gain', 'capital-loss', 'native-country']\ncont_names = ['age', 'fnlwgt', 'hours-per-week']\ntarget = ['salary']\ndf, procs = preprocess_df(df, procs=[Categorify, FillMissing, Normalize], cat_names=cat_names, cont_names=cont_names, y_names=target, \n sample_col=None, reduce_memory=True)\ndf.head()\n\n\n\n\n\n\n\n\nworkclass\neducation\neducation-num\nmarital-status\noccupation\nrelationship\nrace\nsex\ncapital-gain\ncapital-loss\nnative-country\nage\nfnlwgt\nhours-per-week\nsalary\n\n\n\n\n0\n5\n8\n12\n3\n0\n6\n5\n1\n1\n48\n40\n0.763796\n-0.838084\n-0.035429\n1\n\n\n1\n5\n13\n14\n1\n5\n2\n5\n2\n101\n1\n40\n0.397233\n0.444987\n0.369519\n1\n\n\n2\n5\n12\n0\n1\n0\n5\n3\n1\n1\n1\n40\n-0.042642\n-0.886734\n-0.683348\n0\n\n\n3\n6\n15\n15\n3\n11\n1\n2\n2\n1\n1\n40\n-0.042642\n-0.728873\n-0.035429\n1\n\n\n4\n7\n6\n0\n3\n9\n6\n3\n1\n1\n1\n40\n0.250608\n-1.018314\n0.774468\n0\n\n\n\n\n\n\n\n\nprocs.classes, procs.means, procs.stds\n\n({'workclass': ['#na#', ' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'],\n 'education': ['#na#', ' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'],\n 'education-num': ['#na#', 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],\n 'marital-status': ['#na#', ' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'],\n 'occupation': ['#na#', ' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'],\n 'relationship': ['#na#', ' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'],\n 'race': ['#na#', ' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'],\n 'sex': ['#na#', ' Female', ' Male'],\n 'capital-gain': ['#na#', 0, 114, 401, 594, 914, 991, 1055, 1086, 1111, 1151, 1173, 1409, 1424, 1455, 1471, 1506, 1639, 1797, 1831, 1848, 2009, 2036, 2050, 2062, 2105, 2174, 2176, 2202, 2228, 2290, 2329, 2346, 2354, 2387, 2407, 2414, 2463, 2538, 2580, 2597, 2635, 2653, 2829, 2885, 2907, 2936, 2961, 2964, 2977, 2993, 3103, 3137, 3273, 3325, 3411, 3418, 3432, 3456, 3464, 3471, 3674, 3781, 3818, 3887, 3908, 3942, 4064, 4101, 4386, 4416, 4508, 4650, 4687, 4787, 4865, 4931, 4934, 5013, 5060, 5178, 5455, 5556, 5721, 6097, 6360, 6418, 6497, 6514, 6723, 6767, 6849, 7298, 7430, 7443, 7688, 7896, 7978, 8614, 9386, 9562, 10520, 10566, 10605, 11678, 13550, 14084, 14344, 15020, 15024, 15831, 18481, 20051, 22040, 25124, 25236, 27828, 34095, 41310, 99999],\n 'capital-loss': ['#na#', 0, 155, 213, 323, 419, 625, 653, 810, 880, 974, 1092, 1138, 1258, 1340, 1380, 1408, 1411, 1485, 1504, 1539, 1564, 1573, 1579, 1590, 1594, 1602, 1617, 1628, 1648, 1651, 1668, 1669, 1672, 1719, 1721, 1726, 1735, 1740, 1741, 1755, 1762, 1816, 1825, 1844, 1848, 1876, 1887, 1902, 1944, 1974, 1977, 1980, 2001, 2002, 2042, 2051, 2057, 2080, 2129, 2149, 2163, 2174, 2179, 2201, 2205, 2206, 2231, 2238, 2246, 2258, 2267, 2282, 2339, 2352, 2377, 2392, 2415, 2444, 2457, 2467, 2472, 2489, 2547, 2559, 2603, 2754, 2824, 3004, 3683, 3770, 3900, 4356],\n 'native-country': ['#na#', ' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Holand-Netherlands', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia']},\n {'age': 38.58164675532078,\n 'fnlwgt': 189778.36651208502,\n 'hours-per-week': 40.437455852092995},\n {'age': 13.640223192304274,\n 'fnlwgt': 105548.3568809908,\n 'hours-per-week': 12.347239175707989})", + "crumbs": [ + "Data", + "Time Series Tabular Data" + ] + }, + { + "objectID": "callback.predictiondynamics.html", + "href": "callback.predictiondynamics.html", + "title": "PredictionDynamics", + "section": "", + "text": "Callback used to visualize model predictions during training.\n\nThis is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on a blog post by Andrej Karpathy I read some time ago that I really liked. One of the things he mentioned was this:\n\n“visualize prediction dynamics. I like to visualize model predictions on a fixed test batch during the course of training. The “dynamics” of how these predictions move will give you incredibly good intuition for how the training progresses. Many times it is possible to feel the network “struggle” to fit your data if it wiggles too much in some way, revealing instabilities. Very low or very high learning rates are also easily noticeable in the amount of jitter.” A. Karpathy\n\n\nsource\n\nPredictionDynamics\n\n PredictionDynamics (show_perc=1.0, figsize=(10, 6), alpha=0.3, size=30,\n color='lime', cmap='gist_rainbow', normalize=False,\n sensitivity=None, specificity=None)\n\nBasic class handling tweaks of the training loop by changing a Learner in various events\n\nfrom tsai.basics import *\nfrom tsai.models.InceptionTime import *\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ncheck_data(X, y, splits, False)\n\nX - shape: [360 samples x 24 features x 51 timesteps] type: memmap dtype:float32 isnan: 0\ny - shape: (360,) type: memmap dtype:<U3 n_classes: 6 (60 samples per class) ['1.0', '2.0', '3.0', '4.0', '5.0', '6.0'] isnan: False\nsplits - n_splits: 2 shape: [180, 180] overlap: False\n\n\n\ntfms = [None, [Categorize()]]\nbatch_tfms = [TSStandardize(by_var=True)]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, InceptionTime, metrics=accuracy, cbs=PredictionDynamics()) \nlearn.fit_one_cycle(2, 3e-3)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n1.885462\n1.773872\n0.238889\n00:05\n\n\n1\n1.425667\n1.640418\n0.377778\n00:05\n\n\n\n\n\n\n\n\n\n\n\n\ntrain_loss\nvalid_loss\naccuracy\n\n\n\n\n1\n1.425667\n1.640418\n0.377778", + "crumbs": [ + "Training", + "Callbacks", + "PredictionDynamics" + ] + }, + { + "objectID": "models.rocket_pytorch.html", + "href": "models.rocket_pytorch.html", + "title": "ROCKET Pytorch", + "section": "", + "text": "ROCKET (RandOm Convolutional KErnel Transform) functions for univariate and multivariate time series developed in Pytorch.\n\n\nsource\n\nROCKET\n\n ROCKET (c_in, seq_len, n_kernels=10000, kss=[7, 9, 11], device=None,\n verbose=False)\n\nRandOm Convolutional KErnel Transform\nROCKET is a GPU Pytorch implementation of the ROCKET functions generate_kernels and apply_kernels that can be used with univariate and multivariate time series.\n\nsource\n\n\ncreate_rocket_features\n\n create_rocket_features (dl, model, verbose=False)\n\nArgs: model : ROCKET model instance dl : single TSDataLoader (for example dls.train or dls.valid)\n\nbs = 16\nc_in = 7 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 15\nxb = torch.randn(bs, c_in, seq_len).to(default_device())\n\nm = ROCKET(c_in, seq_len, n_kernels=1_000, kss=[7, 9, 11]) # 1_000 for testing with a cpu. Default is 10k with a gpu!\ntest_eq(m(xb).shape, [bs, 2_000])\n\n\nfrom tsai.data.all import *\nfrom tsai.models.utils import *\n\n\nX, y, splits = get_UCR_data('OliveOil', split_data=False)\ntfms = [None, TSRegression()]\nbatch_tfms = TSStandardize(by_var=True)\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, shuffle_train=False, drop_last=False)\nmodel = build_ts_model(ROCKET, dls=dls, n_kernels=1_000) # 1_000 for testing with a cpu. Default is 10k with a gpu!\nX_train, y_train = create_rocket_features(dls.train, model) \nX_valid, y_valid = create_rocket_features(dls.valid, model)\nX_train.shape, X_valid.shape\n\n((30, 2000), (30, 2000))", + "crumbs": [ + "Models", + "ROCKETs", + "ROCKET Pytorch" + ] + }, + { + "objectID": "models.hydraplus.html", + "href": "models.hydraplus.html", + "title": "HydraPlus", + "section": "", + "text": "Hydra: competing convolutional kernels for fast and accurate time series classification.\n\nThis is a Pytorch implementation of Hydra adapted by Ignacio Oguiza and based on:\nDempster, A., Schmidt, D. F., & Webb, G. I. (2023). Hydra: Competing convolutional kernels for fast and accurate time series classification. Data Mining and Knowledge Discovery, 1-27.\nOriginal paper: https://link.springer.com/article/10.1007/s10618-023-00939-3\nOriginal repository: https://github.com/angus924/hydra\n\nsource\n\nHydraBackbonePlus\n\n HydraBackbonePlus (c_in, c_out, seq_len, k=8, g=64, max_c_in=8,\n clip=True, device=device(type='cpu'), zero_init=True)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nHydraPlus\n\n HydraPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None, k:int=8,\n g:int=64, max_c_in:int=8, clip:bool=True, use_bn:bool=True,\n fc_dropout:float=0.0, custom_head:Any=None,\n zero_init:bool=True, use_diff:bool=True,\n device:str=device(type='cpu'))\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nnum of channels in input\n\n\nc_out\nint\n\nnum of channels in output\n\n\nseq_len\nint\n\nsequence length\n\n\nd\ntuple\nNone\nshape of the output (when ndim > 1)\n\n\nk\nint\n8\nnumber of kernels per group\n\n\ng\nint\n64\nnumber of groups\n\n\nmax_c_in\nint\n8\nmax number of channels per group\n\n\nclip\nbool\nTrue\nclip values >= 0\n\n\nuse_bn\nbool\nTrue\nuse batch norm\n\n\nfc_dropout\nfloat\n0.0\ndropout probability\n\n\ncustom_head\ntyping.Any\nNone\noptional custom head as a torch.nn.Module or Callable\n\n\nzero_init\nbool\nTrue\nset head weights and biases to zero\n\n\nuse_diff\nbool\nTrue\nuse diff(X) as input\n\n\ndevice\nstr\ncpu\ndevice to use\n\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = HydraPlus(5, 3, 20, d=None).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = HydraPlus(5, 3, 20, d=None, use_diff=False).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 5, 20)).to(default_device())\n\nmodel = HydraPlus(5, 3, 20, d=20, use_diff=True).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 20, 3)\noutput.shape\n\ntorch.Size([16, 20, 3])", + "crumbs": [ + "Models", + "ROCKETs", + "HydraPlus" + ] + }, + { + "objectID": "wandb.html", + "href": "wandb.html", + "title": "Weights & Biases Sweeps", + "section": "", + "text": "Weights & Biases Sweeps are used to automate hyperparameter optimization and explore the space of possible models.\n\n\nsource\n\nwandb_agent\n\n wandb_agent (script_path, sweep, entity=None, project=None, count=None,\n run=True)\n\nRun wandb agent with sweep and `script_path\n\nsource\n\n\nupdate_run_config\n\n update_run_config (config, new_config, verbose=False)\n\nUpdate config with new_config\n\nsource\n\n\nget_sweep_config\n\n get_sweep_config (config)\n\nGet sweep config from config", + "crumbs": [ + "HPO & experiment tracking", + "Weights & Biases Sweeps" + ] + }, + { + "objectID": "models.positional_encoders.html", + "href": "models.positional_encoders.html", + "title": "Positional encoders", + "section": "", + "text": "This includes some variations of positional encoders used with Transformers.", + "crumbs": [ + "Models", + "Transformers", + "Positional encoders" + ] + }, + { + "objectID": "models.positional_encoders.html#imports", + "href": "models.positional_encoders.html#imports", + "title": "Positional encoders", + "section": "Imports", + "text": "Imports", + "crumbs": [ + "Models", + "Transformers", + "Positional encoders" + ] + }, + { + "objectID": "models.positional_encoders.html#positional-encoders", + "href": "models.positional_encoders.html#positional-encoders", + "title": "Positional encoders", + "section": "Positional encoders", + "text": "Positional encoders\n\nsource\n\nPositionalEncoding\n\n PositionalEncoding (q_len, d_model, normalize=True)\n\n\npe = PositionalEncoding(1000, 512).detach().cpu().numpy()\nplt.pcolormesh(pe, cmap='viridis')\nplt.title('PositionalEncoding')\nplt.colorbar()\nplt.show()\npe.mean(), pe.std(), pe.min(), pe.max(), pe.shape\n\n\n\n\n\n\n\n\n\nsource\n\n\nCoord2dPosEncoding\n\n Coord2dPosEncoding (q_len, d_model, exponential=False, normalize=True,\n eps=0.001, verbose=False)\n\n\ncpe = Coord2dPosEncoding(1000, 512, exponential=True, normalize=True).cpu().numpy()\nplt.pcolormesh(cpe, cmap='viridis')\nplt.title('Coord2dPosEncoding')\nplt.colorbar()\nplt.show()\nplt.plot(cpe.mean(0))\nplt.show()\nplt.plot(cpe.mean(1))\nplt.show()\ncpe.mean(), cpe.std(), cpe.min(), cpe.max()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nCoord1dPosEncoding\n\n Coord1dPosEncoding (q_len, exponential=False, normalize=True)\n\n\ncpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy()\nplt.pcolormesh(cpe, cmap='viridis')\nplt.title('Coord1dPosEncoding')\nplt.colorbar()\nplt.show()\nplt.plot(cpe.mean(1))\nplt.show()\ncpe.mean(), cpe.std(), cpe.min(), cpe.max(), cpe.shape\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ncpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy()\nplt.pcolormesh(cpe, cmap='viridis')\nplt.title('Coord1dPosEncoding')\nplt.colorbar()\nplt.show()\nplt.plot(cpe.mean(1))\nplt.show()\ncpe.mean(), cpe.std(), cpe.min(), cpe.max()", + "crumbs": [ + "Models", + "Transformers", + "Positional encoders" + ] + }, + { + "objectID": "models.minirocket.html", + "href": "models.minirocket.html", + "title": "MINIROCKET", + "section": "", + "text": "A Very Fast (Almost) Deterministic Transform for Time Series Classification.\n\n\nsource\n\nMiniRocketClassifier\n\n MiniRocketClassifier (num_features=10000, max_dilations_per_kernel=32,\n random_state=None, alphas=array([1.e-03, 1.e-02,\n 1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),\n normalize_features=True, memory=None,\n verbose=False, scoring=None, class_weight=None,\n **kwargs)\n\nTime series classification using MINIROCKET features and a linear classifier\n\nsource\n\n\nload_minirocket\n\n load_minirocket (fname, path='./models')\n\n\nsource\n\n\nMiniRocketRegressor\n\n MiniRocketRegressor (num_features=10000, max_dilations_per_kernel=32,\n random_state=None, alphas=array([1.e-03, 1.e-02,\n 1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),\n normalize_features=True, memory=None, verbose=False,\n scoring=None, **kwargs)\n\nTime series regression using MINIROCKET features and a linear regressor\n\nsource\n\n\nload_minirocket\n\n load_minirocket (fname, path='./models')\n\n\nsource\n\n\nMiniRocketVotingClassifier\n\n MiniRocketVotingClassifier (n_estimators=5, weights=None, n_jobs=-1,\n num_features=10000,\n max_dilations_per_kernel=32,\n random_state=None, alphas=array([1.e-03,\n 1.e-02, 1.e-01, 1.e+00, 1.e+01, 1.e+02,\n 1.e+03]), normalize_features=True,\n memory=None, verbose=False, scoring=None,\n class_weight=None, **kwargs)\n\nTime series classification ensemble using MINIROCKET features, a linear classifier and majority voting\n\nsource\n\n\nget_minirocket_preds\n\n get_minirocket_preds (X, fname, path='./models', model=None)\n\n\nsource\n\n\nMiniRocketVotingRegressor\n\n MiniRocketVotingRegressor (n_estimators=5, weights=None, n_jobs=-1,\n num_features=10000,\n max_dilations_per_kernel=32,\n random_state=None, alphas=array([1.e-03,\n 1.e-02, 1.e-01, 1.e+00, 1.e+01, 1.e+02,\n 1.e+03]), normalize_features=True,\n memory=None, verbose=False, scoring=None,\n **kwargs)\n\nTime series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor\n\n# Univariate classification with sklearn-type API\ndsid = 'OliveOil'\nfname = 'MiniRocketClassifier'\nX_train, y_train, X_test, y_test = get_UCR_data(dsid)\ncls = MiniRocketClassifier()\ncls.fit(X_train, y_train)\ncls.save(fname)\npred = cls.score(X_test, y_test)\ndel cls\ncls = load_minirocket(fname)\ntest_eq(cls.score(X_test, y_test), pred)\n\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n\n\n\n# Multivariate classification with sklearn-type API\ndsid = 'NATOPS'\nX_train, y_train, X_test, y_test = get_UCR_data(dsid)\ncls = MiniRocketClassifier()\ncls.fit(X_train, y_train)\ncls.score(X_test, y_test)\n\n0.9277777777777778\n\n\n\n# Multivariate classification with sklearn-type API\ndsid = 'NATOPS'\nX_train, y_train, X_test, y_test = get_UCR_data(dsid)\ncls = MiniRocketVotingClassifier(5)\ncls.fit(X_train, y_train)\ncls.score(X_test, y_test)\n\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n\n\n0.9166666666666666\n\n\n\nfrom sklearn.metrics import mean_squared_error\n\n\n# Univariate regression with sklearn-type API\ndsid = 'Covid3Month'\nfname = 'MiniRocketRegressor'\nX_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)\nif X_train is not None:\n rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)\n reg = MiniRocketRegressor(scoring=rmse_scorer)\n reg.fit(X_train, y_train)\n reg.save(fname)\n del reg\n reg = load_minirocket(fname)\n y_pred = reg.predict(X_test)\n print(mean_squared_error(y_test, y_pred, squared=False))\n\n0.04099244037606886\n\n\n\n# Multivariate regression with sklearn-type API\ndsid = 'AppliancesEnergy'\nX_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)\nif X_train is not None:\n rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)\n reg = MiniRocketRegressor(scoring=rmse_scorer)\n reg.fit(X_train, y_train)\n reg.save(fname)\n del reg\n reg = load_minirocket(fname)\n y_pred = reg.predict(X_test)\n print(mean_squared_error(y_test, y_pred, squared=False))\n\n2.2938026879322577\n\n\n\n# Multivariate regression ensemble with sklearn-type API\nif X_train is not None:\n reg = MiniRocketVotingRegressor(5, scoring=rmse_scorer)\n reg.fit(X_train, y_train)\n y_pred = reg.predict(X_test)\n print(mean_squared_error(y_test, y_pred, squared=False))\n\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n\n\n2.286295546348893", + "crumbs": [ + "Models", + "ROCKETs", + "MINIROCKET" + ] + }, + { + "objectID": "models.utils.html", + "href": "models.utils.html", + "title": "Model utilities", + "section": "", + "text": "Utility functions used to build PyTorch timeseries models.\n\n\nsource\n\napply_idxs\n\n apply_idxs (o, idxs)\n\nFunction to apply indices to zarr, dask and numpy arrays\n\nsource\n\n\nSeqTokenizer\n\n SeqTokenizer (c_in, embed_dim, token_size=60, norm=False)\n\nGenerates non-overlapping tokens from sub-sequences within a sequence by applying a sliding window\n\nsource\n\n\nget_embed_size\n\n get_embed_size (n_cat, rule='log2')\n\n\ntest_eq(get_embed_size(35), 6)\n\n\nsource\n\n\nhas_weight_or_bias\n\n has_weight_or_bias (l)\n\n\nsource\n\n\nhas_weight\n\n has_weight (l)\n\n\nsource\n\n\nhas_bias\n\n has_bias (l)\n\n\nsource\n\n\nis_conv\n\n is_conv (l)\n\n\nsource\n\n\nis_affine_layer\n\n is_affine_layer (l)\n\n\nsource\n\n\nis_conv_linear\n\n is_conv_linear (l)\n\n\nsource\n\n\nis_bn\n\n is_bn (l)\n\n\nsource\n\n\nis_linear\n\n is_linear (l)\n\n\nsource\n\n\nis_layer\n\n is_layer (*args)\n\n\nsource\n\n\nget_layers\n\n get_layers (model, cond=<function noop>, full=True)\n\n\nsource\n\n\ncheck_weight\n\n check_weight (m, cond=<function noop>, verbose=False)\n\n\nsource\n\n\ncheck_bias\n\n check_bias (m, cond=<function noop>, verbose=False)\n\n\nsource\n\n\nget_nf\n\n get_nf (m)\n\nGet nf from model’s first linear layer in head\n\nsource\n\n\nts_splitter\n\n ts_splitter (m)\n\nSplit of a model between body and head\n\nsource\n\n\ntransfer_weights\n\n transfer_weights (model, weights_path:pathlib.Path,\n device:torch.device=None, exclude_head:bool=True)\n\nUtility function that allows to easily transfer weights between models. Taken from the great self-supervised repository created by Kerem Turgutlu. https://github.com/KeremTurgutlu/self_supervised/blob/d87ebd9b4961c7da0efd6073c42782bbc61aaa2e/self_supervised/utils.py\n\nsource\n\n\nbuild_ts_model\n\n build_ts_model (arch, c_in=None, c_out=None, seq_len=None, d=None,\n dls=None, device=None, verbose=False, s_cat_idxs=None,\n s_cat_embeddings=None, s_cat_embedding_dims=None,\n s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,\n o_cat_embedding_dims=None, o_cont_idxs=None,\n patch_len=None, patch_stride=None, fusion_layers=128,\n fusion_act='relu', fusion_dropout=0.0,\n fusion_use_bn=True, pretrained=False, weights_path=None,\n exclude_head=True, cut=-1, init=None, arch_config={},\n **kwargs)\n\n\nsource\n\n\ncount_parameters\n\n count_parameters (model, trainable=True)\n\n\nsource\n\n\nbuild_tsimage_model\n\n build_tsimage_model (arch, c_in=None, c_out=None, dls=None,\n pretrained=False, device=None, verbose=False,\n init=None, arch_config={}, **kwargs)\n\n\nsource\n\n\nbuild_tabular_model\n\n build_tabular_model (arch, dls, layers=None, emb_szs=None, n_out=None,\n y_range=None, device=None, arch_config={}, **kwargs)\n\n\nfrom tsai.data.external import get_UCR_data\nfrom tsai.data.core import TSCategorize, get_ts_dls\nfrom tsai.data.preprocessing import TSStandardize\nfrom tsai.models.InceptionTime import *\n\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, TSCategorize()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits, tfms=tfms, batch_tfms=batch_tfms)\nmodel = build_ts_model(InceptionTime, dls=dls)\ntest_eq(count_parameters(model), 460038)\n\n\nsource\n\n\nget_clones\n\n get_clones (module, N)\n\n\nm = nn.Conv1d(3,4,3)\nget_clones(m, 3)\n\nModuleList(\n (0-2): 3 x Conv1d(3, 4, kernel_size=(3,), stride=(1,))\n)\n\n\n\nsource\n\n\nsplit_model\n\n split_model (m)\n\n\nsource\n\n\noutput_size_calculator\n\n output_size_calculator (mod, c_in, seq_len=None)\n\n\nc_in = 3\nseq_len = 30\nm = nn.Conv1d(3, 12, kernel_size=3, stride=2)\nnew_c_in, new_seq_len = output_size_calculator(m, c_in, seq_len)\ntest_eq((new_c_in, new_seq_len), (12, 14))\n\n[W NNPACK.cpp:64] Could not initialize NNPACK! Reason: Unsupported hardware.\n\n\n\nsource\n\n\nchange_model_head\n\n change_model_head (model, custom_head, **kwargs)\n\nReplaces a model’s head by a custom head as long as the model has a head, head_nf, c_out and seq_len attributes\n\nsource\n\n\ntrue_forecaster\n\n true_forecaster (o, split, horizon=1)\n\n\nsource\n\n\nnaive_forecaster\n\n naive_forecaster (o, split, horizon=1)\n\n\na = np.random.rand(20).cumsum()\nsplit = np.arange(10, 20)\na, naive_forecaster(a, split, 1), true_forecaster(a, split, 1)\n\n(array([ 0.74775537, 1.41245663, 2.12445924, 2.8943163 , 3.56384351,\n 4.23789602, 4.83134182, 5.18560431, 5.30551186, 6.29076506,\n 6.58873471, 7.03661275, 7.0884361 , 7.57927022, 8.21911791,\n 8.59726773, 9.37382718, 10.17298849, 10.40118308, 10.82265631]),\n array([ 6.29076506, 6.58873471, 7.03661275, 7.0884361 , 7.57927022,\n 8.21911791, 8.59726773, 9.37382718, 10.17298849, 10.40118308]),\n array([ 6.58873471, 7.03661275, 7.0884361 , 7.57927022, 8.21911791,\n 8.59726773, 9.37382718, 10.17298849, 10.40118308, 10.82265631]))", + "crumbs": [ + "Models", + "Model utilities" + ] + }, + { + "objectID": "optuna.html", + "href": "optuna.html", + "title": "Optuna", + "section": "", + "text": "A hyperparameter optimization framework\n\nOptuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning. It features an imperative, define-by-run style user API. Thanks to our define-by-run API, the code written with Optuna enjoys high modularity, and the user of Optuna can dynamically construct the search spaces for the hyperparameters.\n\ndef run_optuna_study(objective, resume=None, study_type=None, multivariate=True, search_space=None, evaluate=None, seed=None, sampler=None, pruner=None, \n study_name=None, direction='maximize', n_trials=None, timeout=None, gc_after_trial=False, show_progress_bar=True, \n save_study=True, path='optuna', show_plots=True):\n r\"\"\"Creates and runs an optuna study.\n\n Args: \n objective: A callable that implements objective function.\n resume: Path to a previously saved study.\n study_type: Type of study selected (bayesian, gridsearch, randomsearch). Based on this a sampler will be build if sampler is None. \n If a sampler is passed, this has no effect.\n multivariate: If this is True, the multivariate TPE is used when suggesting parameters. The multivariate TPE is reported to outperform \n the independent TPE.\n search_space: Search space required when running a gridsearch (if you don't pass a sampler).\n evaluate: Allows you to pass a specific set of hyperparameters that will be evaluated.\n seed: Fixed seed used by samplers.\n sampler: A sampler object that implements background algorithm for value suggestion. If None is specified, TPESampler is used during \n single-objective optimization and NSGAIISampler during multi-objective optimization. See also samplers.\n pruner: A pruner object that decides early stopping of unpromising trials. If None is specified, MedianPruner is used as the default. \n See also pruners.\n study_name: Study’s name. If this argument is set to None, a unique name is generated automatically.\n direction: A sequence of directions during multi-objective optimization.\n n_trials: The number of trials. If this argument is set to None, there is no limitation on the number of trials. If timeout is also set to \n None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM.\n timeout: Stop study after the given number of second(s). If this argument is set to None, the study is executed without time limitation. \n If n_trials is also set to None, the study continues to create trials until it receives a termination signal such as \n Ctrl+C or SIGTERM.\n gc_after_trial: Flag to execute garbage collection at the end of each trial. By default, garbage collection is enabled, just in case. \n You can turn it off with this argument if memory is safely managed in your objective function.\n show_progress_bar: Flag to show progress bars or not. To disable progress bar, set this False.\n save_study: Save your study when finished/ interrupted.\n path: Folder where the study will be saved.\n show_plots: Flag to control whether plots are shown at the end of the study.\n \"\"\"\n \n try: import optuna\n except ImportError: raise ImportError('You need to install optuna to use run_optuna_study')\n\n # Sampler\n if sampler is None:\n if study_type is None or \"bayes\" in study_type.lower(): \n sampler = optuna.samplers.TPESampler(seed=seed, multivariate=multivariate)\n elif \"grid\" in study_type.lower():\n assert search_space, f\"you need to pass a search_space dict to run a gridsearch\"\n sampler = optuna.samplers.GridSampler(search_space)\n elif \"random\" in study_type.lower(): \n sampler = optuna.samplers.RandomSampler(seed=seed)\n assert sampler, \"you need to either select a study type (bayesian, gridsampler, randomsampler) or pass a sampler\"\n\n # Study\n if resume: \n try:\n study = joblib.load(resume)\n except: \n print(f\"joblib.load({resume}) couldn't recover any saved study. Check the path.\")\n return\n print(\"Best trial until now:\")\n print(\" Value: \", study.best_trial.value)\n print(\" Params: \")\n for key, value in study.best_trial.params.items():\n print(f\" {key}: {value}\")\n else: \n study = optuna.create_study(sampler=sampler, pruner=pruner, study_name=study_name, direction=direction)\n if evaluate: study.enqueue_trial(evaluate)\n try:\n study.optimize(objective, n_trials=n_trials, timeout=timeout, gc_after_trial=gc_after_trial, show_progress_bar=show_progress_bar)\n except KeyboardInterrupt:\n pass\n\n # Save\n if save_study:\n full_path = Path(path)/f'{study.study_name}.pkl'\n full_path.parent.mkdir(parents=True, exist_ok=True)\n joblib.dump(study, full_path)\n print(f'\\nOptuna study saved to {full_path}')\n print(f\"To reload the study run: study = joblib.load('{full_path}')\")\n\n # Plots\n if show_plots and len(study.trials) > 1:\n try: display(optuna.visualization.plot_optimization_history(study))\n except: pass\n try: display(optuna.visualization.plot_param_importances(study))\n except: pass\n try: display(optuna.visualization.plot_slice(study))\n except: pass\n try: display(optuna.visualization.plot_parallel_coordinate(study))\n except: pass\n\n # Study stats\n try:\n pruned_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED]\n complete_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]\n print(f\"\\nStudy statistics : \")\n print(f\" Study name : {study.study_name}\")\n print(f\" # finished trials : {len(study.trials)}\")\n print(f\" # pruned trials : {len(pruned_trials)}\")\n print(f\" # complete trials : {len(complete_trials)}\")\n \n print(f\"\\nBest trial :\")\n trial = study.best_trial\n print(f\" value : {trial.value}\")\n print(f\" best_params = {trial.params}\\n\")\n except:\n print('\\nNo finished trials yet.')\n return study\n\n\nsource\n\nrun_optuna_study\n\n run_optuna_study (objective, resume=None, study_type=None,\n multivariate=True, search_space=None, evaluate=None,\n seed=None, sampler=None, pruner=None, study_name=None,\n direction='maximize', n_trials=None, timeout=None,\n gc_after_trial=False, show_progress_bar=True,\n save_study=True, path='optuna', show_plots=True)\n\nCreates and runs an optuna study.\nArgs: objective: A callable that implements objective function. resume: Path to a previously saved study. study_type: Type of study selected (bayesian, gridsearch, randomsearch). Based on this a sampler will be build if sampler is None. If a sampler is passed, this has no effect. multivariate: If this is True, the multivariate TPE is used when suggesting parameters. The multivariate TPE is reported to outperform the independent TPE. search_space: Search space required when running a gridsearch (if you don’t pass a sampler). evaluate: Allows you to pass a specific set of hyperparameters that will be evaluated. seed: Fixed seed used by samplers. sampler: A sampler object that implements background algorithm for value suggestion. If None is specified, TPESampler is used during single-objective optimization and NSGAIISampler during multi-objective optimization. See also samplers. pruner: A pruner object that decides early stopping of unpromising trials. If None is specified, MedianPruner is used as the default. See also pruners. study_name: Study’s name. If this argument is set to None, a unique name is generated automatically. direction: A sequence of directions during multi-objective optimization. n_trials: The number of trials. If this argument is set to None, there is no limitation on the number of trials. If timeout is also set to None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM. timeout: Stop study after the given number of second(s). If this argument is set to None, the study is executed without time limitation. If n_trials is also set to None, the study continues to create trials until it receives a termination signal such as Ctrl+C or SIGTERM. gc_after_trial: Flag to execute garbage collection at the end of each trial. By default, garbage collection is enabled, just in case. You can turn it off with this argument if memory is safely managed in your objective function. show_progress_bar: Flag to show progress bars or not. To disable progress bar, set this False. save_study: Save your study when finished/ interrupted. path: Folder where the study will be saved. show_plots: Flag to control whether plots are shown at the end of the study.", + "crumbs": [ + "HPO & experiment tracking", + "Optuna" + ] + }, + { + "objectID": "models.multirocketplus.html", + "href": "models.multirocketplus.html", + "title": "MultiRocketPlus", + "section": "", + "text": "MultiRocket: Multiple pooling operators and transformations for fast and effective time series classification.\n\nThis is a Pytorch implementation of MultiRocket developed by Malcolm McLean and Ignacio Oguiza based on:\nTan, C. W., Dempster, A., Bergmeir, C., & Webb, G. I. (2022). MultiRocket: multiple pooling operators and transformations for fast and effective time series classification. Data Mining and Knowledge Discovery, 36(5), 1623-1646.\nOriginal paper: https://link.springer.com/article/10.1007/s10618-022-00844-1\nOriginal repository: https://github.com/ChangWeiTan/MultiRocket\n\nsource\n\nFlatten\n\n Flatten (*args, **kwargs)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nfrom tsai.imports import default_device\n\n\no = torch.rand(2, 3, 5, 4).to(default_device()) - .3\nprint(o)\n\noutput = _LPVV(o, dim=2)\nprint(output) # Should print: torch.Size([2, 3, 4])\n\ntensor([[[[ 0.5644, -0.0509, -0.0390, 0.4091],\n [ 0.0517, -0.1471, 0.6458, 0.5593],\n [ 0.4516, -0.0821, 0.1271, 0.0592],\n [ 0.4151, 0.4376, 0.0763, 0.3780],\n [ 0.2653, -0.1817, 0.0156, 0.4993]],\n\n [[-0.0779, 0.0858, 0.1982, 0.3224],\n [ 0.1130, 0.0714, -0.1779, 0.5360],\n [-0.1848, -0.2270, -0.0925, -0.1217],\n [ 0.2820, -0.0205, -0.2777, 0.3755],\n [-0.2490, 0.2613, 0.4237, 0.4534]],\n\n [[-0.0162, 0.6368, 0.0016, 0.1467],\n [ 0.6035, -0.1365, 0.6930, 0.6943],\n [ 0.2790, 0.3818, -0.0731, 0.0167],\n [ 0.6442, 0.3443, 0.4829, -0.0944],\n [ 0.2932, 0.6952, 0.5541, 0.5946]]],\n\n\n [[[ 0.6757, 0.5740, 0.3071, 0.4400],\n [-0.2344, -0.1056, 0.4773, 0.2432],\n [ 0.2595, -0.1528, -0.0866, 0.6201],\n [ 0.0657, 0.1220, 0.4849, 0.4254],\n [ 0.3399, -0.1609, 0.3465, 0.2389]],\n\n [[-0.0765, 0.0516, 0.0028, 0.4381],\n [ 0.5212, -0.2781, -0.0896, -0.0301],\n [ 0.6857, 0.3583, 0.5869, 0.3418],\n [ 0.3002, 0.5135, 0.6011, 0.6499],\n [-0.2807, -0.2888, 0.3965, 0.6585]],\n\n [[-0.1368, 0.6677, 0.1439, 0.1434],\n [-0.1820, 0.1041, -0.1211, 0.6103],\n [ 0.5808, 0.4588, 0.4572, 0.3713],\n [ 0.2389, -0.1392, 0.1371, -0.1570],\n [ 0.2840, 0.1214, -0.0059, 0.5064]]]], device='mps:0')\ntensor([[[ 1.0000, -0.6000, 0.6000, 1.0000],\n [-0.6000, -0.2000, -0.6000, -0.2000],\n [ 0.6000, 0.2000, -0.2000, 0.2000]],\n\n [[ 0.2000, -0.6000, -0.2000, 1.0000],\n [ 0.2000, -0.2000, 0.2000, 0.2000],\n [ 0.2000, 0.2000, -0.2000, 0.2000]]], device='mps:0')\n\n\n\noutput = _MPV(o, dim=2)\nprint(output) # Should print: torch.Size([2, 3, 4])\n\ntensor([[[0.3496, 0.4376, 0.2162, 0.3810],\n [0.1975, 0.1395, 0.3109, 0.4218],\n [0.4550, 0.5145, 0.4329, 0.3631]],\n\n [[0.3352, 0.3480, 0.4040, 0.3935],\n [0.5023, 0.3078, 0.3968, 0.5221],\n [0.3679, 0.3380, 0.2460, 0.4079]]], device='mps:0')\n\n\n\noutput = _RSPV(o, dim=2)\nprint(output) # Should print: torch.Size([2, 3, 4])\n\ntensor([[[ 1.0000, -0.0270, 0.9138, 1.0000],\n [-0.1286, 0.2568, 0.0630, 0.8654],\n [ 0.9823, 0.8756, 0.9190, 0.8779]],\n\n [[ 0.7024, 0.2482, 0.8983, 1.0000],\n [ 0.6168, 0.2392, 0.8931, 0.9715],\n [ 0.5517, 0.8133, 0.7065, 0.8244]]], device='mps:0')\n\n\n\noutput = _PPV(o, dim=2)\nprint(output) # Should print: torch.Size([2, 3, 4])\n\ntensor([[[-0.3007, -1.0097, -0.6697, -0.2381],\n [-1.0466, -0.9316, -0.9705, -0.3738],\n [-0.2786, -0.2314, -0.3366, -0.4569]],\n\n [[-0.5574, -0.8893, -0.3883, -0.2130],\n [-0.5401, -0.8574, -0.4009, -0.1767],\n [-0.6861, -0.5149, -0.7555, -0.4102]]], device='mps:0')\n\n\n\nsource\n\n\nMultiRocketFeaturesPlus\n\n MultiRocketFeaturesPlus (c_in, seq_len, num_features=10000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=9, max_num_kernels=84,\n diff=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nMultiRocketBackbonePlus\n\n MultiRocketBackbonePlus (c_in, seq_len, num_features=50000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=None, max_num_kernels=84,\n use_diff=True)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nMultiRocketPlus\n\n MultiRocketPlus (c_in, c_out, seq_len, d=None, num_features=50000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=None, max_num_kernels=84, use_bn=True,\n fc_dropout=0, custom_head=None, zero_init=True,\n use_diff=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.imports import default_device\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = MultiRocketPlus(5, 3, 20, d=None, use_diff=True).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = MultiRocketPlus(5, 3, 20, d=None, use_diff=False).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 5, 20)).to(default_device())\n\nmodel = MultiRocketPlus(5, 3, 20, d=20, use_diff=True).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 20, 3)\noutput.shape\n\ntorch.Size([16, 20, 3])", + "crumbs": [ + "Models", + "ROCKETs", + "MultiRocketPlus" + ] + }, + { + "objectID": "models.tabmodel.html", + "href": "models.tabmodel.html", + "title": "TabModel", + "section": "", + "text": "This is an implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on fastai’s TabularModel.\nWe built it so that it’s easy to change the head of the model, something that is particularly interesting when building hybrid models.\n\nsource\n\nTabHead\n\n TabHead (emb_szs, n_cont, c_out, layers=None, fc_dropout=None,\n y_range=None, use_bn=True, bn_final=False, lin_first=False,\n act=ReLU(inplace=True), skip=False)\n\nBasic head for tabular data.\n\nsource\n\n\nTabBackbone\n\n TabBackbone (emb_szs, n_cont, embed_p=0.0, bn_cont=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nTabModel\n\n TabModel (emb_szs, n_cont, c_out, layers=None, fc_dropout=None,\n embed_p=0.0, y_range=None, use_bn=True, bn_final=False,\n bn_cont=True, lin_first=False, act=ReLU(inplace=True),\n skip=False)\n\nBasic model for tabular data.\n\nfrom fastai.tabular.core import *\nfrom tsai.data.tabular import *\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\n# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable\nprocs = [Categorify, FillMissing, Normalize]\ncat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']\ncont_names = ['age', 'fnlwgt', 'education-num']\ny_names = ['salary']\ny_block = RegressionBlock() if isinstance(df['salary'].values[0], float) else CategoryBlock()\nsplits = RandomSplitter()(range_of(df))\npd.options.mode.chained_assignment=None\nto = TabularPandas(df, procs=procs, cat_names=cat_names, cont_names=cont_names, y_names=y_names, y_block=y_block, splits=splits, inplace=True, \n reduce_memory=False)\nto.show(5)\ntab_dls = to.dataloaders(bs=16, val_bs=32)\nb = first(tab_dls.train)\ntest_eq((b[0].shape, b[1].shape, b[2].shape), (torch.Size([16, 7]), torch.Size([16, 3]), torch.Size([16, 1])))\n\n\n\n\n\nworkclass\neducation\nmarital-status\noccupation\nrelationship\nrace\neducation-num_na\nage\nfnlwgt\neducation-num\nsalary\n\n\n\n\n20505\nPrivate\nHS-grad\nMarried-civ-spouse\nSales\nHusband\nWhite\nFalse\n47.0\n197836.0\n9.0\n<50k\n\n\n28679\nPrivate\nHS-grad\nMarried-civ-spouse\nCraft-repair\nHusband\nWhite\nFalse\n28.0\n65078.0\n9.0\n>=50k\n\n\n11669\nPrivate\nHS-grad\nNever-married\nAdm-clerical\nNot-in-family\nWhite\nFalse\n38.0\n202683.0\n9.0\n<50k\n\n\n29079\nSelf-emp-not-inc\nBachelors\nMarried-civ-spouse\nProf-specialty\nHusband\nWhite\nFalse\n41.0\n168098.0\n13.0\n<50k\n\n\n7061\nPrivate\nHS-grad\nMarried-civ-spouse\nAdm-clerical\nHusband\nWhite\nFalse\n31.0\n243442.0\n9.0\n<50k\n\n\n\n\n\n\ntab_model = build_tabular_model(TabModel, dls=tab_dls)\nb = first(tab_dls.train)\ntest_eq(tab_model.to(b[0].device)(*b[:-1]).shape, (tab_dls.bs, tab_dls.c))\nlearn = Learner(tab_dls, tab_model, splitter=ts_splitter)\np1 = count_parameters(learn.model)\nlearn.freeze()\np2 = count_parameters(learn.model)\nlearn.unfreeze()\np3 = count_parameters(learn.model)\nassert p1 == p3\nassert p1 > p2 > 0", + "crumbs": [ + "Models", + "Tabular models", + "TabModel" + ] + }, + { + "objectID": "models.hydramultirocketplus.html", + "href": "models.hydramultirocketplus.html", + "title": "HydraMultiRocketPlus", + "section": "", + "text": "Hydra: competing convolutional kernels for fast and accurate time series classification.\n\nThis is a Pytorch implementation of Hydra-MultiRocket adapted by Ignacio Oguiza and based on:\nDempster, A., Schmidt, D. F., & Webb, G. I. (2023). Hydra: Competing convolutional kernels for fast and accurate time series classification. Data Mining and Knowledge Discovery, 1-27.\nOriginal paper: https://link.springer.com/article/10.1007/s10618-023-00939-3\nOriginal repository: https://github.com/angus924/hydra\n\nsource\n\nHydraMultiRocketBackbonePlus\n\n HydraMultiRocketBackbonePlus (c_in, c_out, seq_len, d=None, k=8, g=64,\n max_c_in=8, clip=True, num_features=50000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=None, max_num_kernels=84,\n use_bn=True, fc_dropout=0,\n custom_head=None, zero_init=True,\n use_diff=True, device=device(type='cpu'))\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nHydraMultiRocketPlus\n\n HydraMultiRocketPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n k:int=8, g:int=64, max_c_in:int=8, clip:bool=True,\n num_features:int=50000,\n max_dilations_per_kernel:int=32, kernel_size:int=9,\n max_num_channels:int=None, max_num_kernels:int=84,\n use_bn:bool=True, fc_dropout:float=0.0,\n custom_head:Any=None, zero_init:bool=True,\n use_diff:bool=True, device:str=device(type='cpu'))\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nnum of channels in input\n\n\nc_out\nint\n\nnum of channels in output\n\n\nseq_len\nint\n\nsequence length\n\n\nd\ntuple\nNone\nshape of the output (when ndim > 1)\n\n\nk\nint\n8\nnumber of kernels per group in HydraBackbone\n\n\ng\nint\n64\nnumber of groups in HydraBackbone\n\n\nmax_c_in\nint\n8\nmax number of channels per group in HydraBackbone\n\n\nclip\nbool\nTrue\nclip values >= 0 in HydraBackbone\n\n\nnum_features\nint\n50000\nnumber of MultiRocket features\n\n\nmax_dilations_per_kernel\nint\n32\nmax dilations per kernel in MultiRocket\n\n\nkernel_size\nint\n9\nkernel size in MultiRocket\n\n\nmax_num_channels\nint\nNone\nmax number of channels in MultiRocket\n\n\nmax_num_kernels\nint\n84\nmax number of kernels in MultiRocket\n\n\nuse_bn\nbool\nTrue\nuse batch norm\n\n\nfc_dropout\nfloat\n0.0\ndropout probability\n\n\ncustom_head\ntyping.Any\nNone\noptional custom head as a torch.nn.Module or Callable\n\n\nzero_init\nbool\nTrue\nset head weights and biases to zero\n\n\nuse_diff\nbool\nTrue\nuse diff(X) as input\n\n\ndevice\nstr\ncpu\ndevice to use\n\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = HydraMultiRocketPlus(5, 3, 20, d=None).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 20)).to(default_device())\n\nmodel = HydraMultiRocketPlus(5, 3, 20, d=None, use_diff=False).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 3)\noutput.shape\n\ntorch.Size([16, 3])\n\n\n\nxb = torch.randn(16, 5, 20).to(default_device())\nyb = torch.randint(0, 3, (16, 5, 20)).to(default_device())\n\nmodel = HydraMultiRocketPlus(5, 3, 20, d=20, use_diff=True).to(default_device())\noutput = model(xb)\nassert output.shape == (16, 20, 3)\noutput.shape\n\ntorch.Size([16, 20, 3])", + "crumbs": [ + "Models", + "ROCKETs", + "HydraMultiRocketPlus" + ] + }, + { + "objectID": "models.tsperceiver.html", + "href": "models.tsperceiver.html", + "title": "TSPerceiver", + "section": "", + "text": "This implementation is inspired by:\nJaegle, A., Gimeno, F., Brock, A., Zisserman, A., Vinyals, O., & Carreira, J. (2021).\nPerceiver: General Perception with Iterative Attention. arXiv preprint arXiv:2103.03206.\nPaper: https://arxiv.org/pdf/2103.03206.pdf\nOfficial repo: Not available as og April, 2021.\n\nsource\n\nTSPerceiver\n\n TSPerceiver (c_in, c_out, seq_len, cat_szs=0, n_cont=0, n_latents=512,\n d_latent=128, d_context=None, n_layers=6,\n self_per_cross_attn=1, share_weights=True, cross_n_heads=1,\n self_n_heads=8, d_head=None, attn_dropout=0.0,\n fc_dropout=0.0, concat_pool=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.basics import *\nfrom tsai.data.all import *\n\n\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, split_data=False)\nts_features_df = get_ts_features(X, y)\nts_features_df.shape\n\nFeature Extraction: 100%|██████████████████████████████████████████| 30/30 [00:00<00:00, 189.16it/s]\n\n\n(60, 11)\n\n\n\n# raw ts\ntfms = [None, [Categorize()]]\nbatch_tfms = TSStandardize(by_sample=True)\nts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\n\n# ts features\ncat_names = None\ncont_names = ts_features_df.columns[:-2]\ny_names = 'target'\ntab_dls = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)\n\n# mixed\nmixed_dls = get_mixed_dls(ts_dls, tab_dls)\nxb, yb = mixed_dls.one_batch()\n\n\nmodel = TSPerceiver(ts_dls.vars, ts_dls.c, ts_dls.len, cat_szs=0, \n # n_cont=0, \n n_cont=xb[1][1].shape[1], \n n_latents=128, d_latent=128, n_layers=3, self_per_cross_attn=1, share_weights=True,\n cross_n_heads=16, self_n_heads=16, d_head=None, attn_dropout=0., fc_dropout=0.).to(device)\ntest_eq(model(xb).shape, (yb.shape[0], len(np.unique(y))))", + "crumbs": [ + "Models", + "Transformers", + "TSPerceiver" + ] + }, + { + "objectID": "data.mixed.html", + "href": "data.mixed.html", + "title": "Mixed data", + "section": "", + "text": "DataLoader than can take data from multiple dataloaders with different types of data\n\n\nsource\n\nMixedDataLoaders\n\n MixedDataLoaders (*loaders, path:str|Path='.', device=None)\n\nBasic wrapper around several DataLoaders.\n\nsource\n\n\nMixedDataLoader\n\n MixedDataLoader (*loaders, path='.', shuffle=False, device=None, bs=None)\n\nAccepts any number of DataLoader and a device\n\nsource\n\n\nget_mixed_dls\n\n get_mixed_dls (*dls, device=None, shuffle_train=None, shuffle_valid=None,\n **kwargs)\n\n\nfrom tsai.data.tabular import *\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\n# df['salary'] = np.random.rand(len(df)) # uncomment to simulate a cont dependent variable\ntarget = 'salary'\nsplits = RandomSplitter()(range_of(df))\n\ncat_names = ['workclass', 'education', 'marital-status']\ncont_names = ['age', 'fnlwgt']\ndls1 = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=512)\ndls1.show_batch()\n\ncat_names = None #['occupation', 'relationship', 'race']\ncont_names = ['education-num']\ndls2 = get_tabular_dls(df, cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=128)\ndls2.show_batch()\n\n\n\n\n\nworkclass\neducation\nmarital-status\nage\nfnlwgt\nsalary\n\n\n\n\n0\nPrivate\nBachelors\nMarried-civ-spouse\n59.999999\n131680.999115\n>=50k\n\n\n1\nPrivate\n12th\nNever-married\n18.000000\n311795.000052\n<50k\n\n\n2\nPrivate\nHS-grad\nMarried-civ-spouse\n45.000000\n350440.002257\n>=50k\n\n\n3\nLocal-gov\nMasters\nNever-married\n44.000000\n101593.001253\n<50k\n\n\n4\n?\nSome-college\nNever-married\n20.999999\n41355.995576\n<50k\n\n\n5\nPrivate\nBachelors\nNever-married\n30.000000\n207668.000292\n<50k\n\n\n6\nFederal-gov\nBachelors\nNever-married\n28.000000\n281859.998606\n<50k\n\n\n7\n?\nSome-college\nNever-married\n20.999999\n180338.999810\n<50k\n\n\n8\nPrivate\nSome-college\nNever-married\n20.000000\n174713.999509\n<50k\n\n\n9\nSelf-emp-not-inc\nBachelors\nMarried-civ-spouse\n50.000000\n334273.005863\n<50k\n\n\n\n\n\n\n\n\n\neducation-num_na\neducation-num\nsalary\n\n\n\n\n0\nFalse\n9.0\n<50k\n\n\n1\nFalse\n9.0\n<50k\n\n\n2\nFalse\n13.0\n>=50k\n\n\n3\nFalse\n9.0\n<50k\n\n\n4\nFalse\n9.0\n<50k\n\n\n5\nFalse\n13.0\n>=50k\n\n\n6\nFalse\n10.0\n<50k\n\n\n7\nFalse\n10.0\n<50k\n\n\n8\nFalse\n13.0\n<50k\n\n\n9\nFalse\n10.0\n<50k\n\n\n\n\n\n\ndls = get_mixed_dls(dls1, dls2, bs=8)\nfirst(dls.train)\nfirst(dls.valid)\ntorch.save(dls,'export/mixed_dls.pth')\ndel dls\ndls = torch.load('export/mixed_dls.pth')\ndls.train.show_batch()\n\n\n\n\n\nworkclass\neducation\nmarital-status\nage\nfnlwgt\nsalary\n\n\n\n\n0\nState-gov\nHS-grad\nNever-married\n43.000000\n23156.998049\n<50k\n\n\n1\nPrivate\n11th\nMarried-civ-spouse\n32.000000\n140092.001434\n<50k\n\n\n2\nSelf-emp-not-inc\nHS-grad\nNever-married\n43.000000\n48086.995399\n<50k\n\n\n3\nSelf-emp-not-inc\nAssoc-acdm\nNever-married\n34.000000\n177638.999728\n<50k\n\n\n4\nLocal-gov\nMasters\nMarried-civ-spouse\n65.000001\n146453.999176\n<50k\n\n\n5\nPrivate\nHS-grad\nMarried-civ-spouse\n33.000000\n227281.999333\n<50k\n\n\n6\nPrivate\nHS-grad\nNever-married\n33.000000\n194900.999911\n<50k\n\n\n7\nPrivate\nHS-grad\nDivorced\n23.000000\n259301.002460\n<50k\n\n\n\n\n\n\n\n\n\neducation-num_na\neducation-num\nsalary\n\n\n\n\n0\nFalse\n9.0\n<50k\n\n\n1\nFalse\n7.0\n<50k\n\n\n2\nFalse\n9.0\n<50k\n\n\n3\nFalse\n12.0\n<50k\n\n\n4\nFalse\n14.0\n<50k\n\n\n5\nTrue\n10.0\n<50k\n\n\n6\nFalse\n9.0\n<50k\n\n\n7\nFalse\n9.0\n<50k\n\n\n\n\n\n\nxb, yb = first(dls.train)\nxb\n\n((tensor([[ 8, 12, 5],\n [ 5, 2, 3],\n [ 7, 12, 5],\n [ 7, 8, 5],\n [ 3, 13, 3],\n [ 5, 12, 3],\n [ 5, 12, 5],\n [ 5, 12, 1]]),\n tensor([[ 0.3222, -1.5782],\n [-0.4850, -0.4696],\n [ 0.3222, -1.3418],\n [-0.3383, -0.1136],\n [ 1.9368, -0.4093],\n [-0.4117, 0.3570],\n [-0.4117, 0.0500],\n [-1.1455, 0.6606]])),\n (tensor([[1],\n [1],\n [1],\n [1],\n [1],\n [2],\n [1],\n [1]]),\n tensor([[-0.4258],\n [-1.2097],\n [-0.4258],\n [ 0.7502],\n [ 1.5342],\n [-0.0338],\n [-0.4258],\n [-0.4258]])))\n\n\n\nxs, ys = first(dls.train)\nxs[0][0].shape, xs[0][1].shape, xs[1][0].shape, xs[1][1].shape\n\n(torch.Size([8, 3]),\n torch.Size([8, 2]),\n torch.Size([8, 1]),\n torch.Size([8, 1]))\n\n\n\nfrom tsai.data.validation import TimeSplitter\nfrom tsai.data.core import TSRegression, get_ts_dls\n\n\nX = np.repeat(np.repeat(np.arange(8)[:, None, None], 2, 1), 5, 2).astype(float)\nX = np.concatenate([X, X])\ny = np.concatenate([np.arange(len(X)//2)]*2)\nalphabet = np.array(list(string.ascii_lowercase))\n# y = alphabet[y]\nsplits = TimeSplitter(.5, show_plot=False)(range_of(X))\ntfms = [None, TSRegression()]\ndls1 = get_ts_dls(X, y, splits=splits, tfms=tfms)\ndls1.one_batch()\n\n(TSTensor(samples:8, vars:2, len:5, device=cpu, dtype=torch.float32),\n tensor([7., 0., 2., 1., 5., 4., 3., 6.]))\n\n\n\ndata = np.concatenate([np.repeat(np.arange(8)[:, None], 3, 1)*np.array([1, 10, 100])]*2)\ndf = pd.DataFrame(data, columns=['cat1', 'cat2', 'cont'])\ndf['cont'] = df['cont'].astype(float)\ndf['target'] = y\ncat_names = ['cat1', 'cat2']\ncont_names = ['cont']\ntarget = 'target'\ndls2 = get_tabular_dls(df, procs=[Categorify, FillMissing, #Normalize\n ], cat_names=cat_names, cont_names=cont_names, y_names=target, splits=splits, bs=8)\ndls2.one_batch()\n\n(tensor([[2, 2],\n [5, 5],\n [1, 1],\n [7, 7],\n [3, 3],\n [6, 6],\n [8, 8],\n [4, 4]]),\n tensor([[100.],\n [400.],\n [ 0.],\n [600.],\n [200.],\n [500.],\n [700.],\n [300.]]),\n tensor([[1],\n [4],\n [0],\n [6],\n [2],\n [5],\n [7],\n [3]], dtype=torch.int8))\n\n\n\nz = zip(_loaders[dls1.train.fake_l.num_workers == 0](dls1.train.fake_l))\nfor b in z: \n print(b)\n break\n\n((TSTensor(samples:8, vars:2, len:5, device=cpu, dtype=torch.float32), tensor([7., 0., 2., 1., 5., 4., 3., 6.])),)\n\n\n\nbs = 8\ndls = get_mixed_dls(dls1, dls2, bs=bs)\ndl = dls.train\nxb, yb = dl.one_batch()\ntest_eq(len(xb), 2)\ntest_eq(len(xb[0]), bs)\ntest_eq(len(xb[1]), 2)\ntest_eq(len(xb[1][0]), bs)\ntest_eq(len(xb[1][1]), bs)\ntest_eq(xb[0].data[:, 0, 0].long(), xb[1][0][:, 0] - 1) # categorical data and ts are in synch\ntest_eq(xb[0].data[:, 0, 0], (xb[1][1]/100).flatten()) # continuous data and ts are in synch\ntest_eq(tensor(dl.input_idxs), yb.long().cpu())\ndl = dls.valid\nxb, yb = dl.one_batch()\ntest_eq(tensor(y[dl.input_idxs]), yb.long().cpu())", + "crumbs": [ + "Data", + "Mixed data" + ] + }, + { + "objectID": "data.external.html", + "href": "data.external.html", + "title": "External data", + "section": "", + "text": "Helper functions used to download and extract common time series datasets.\n\n\nsource\n\ndecompress_from_url\n\n decompress_from_url (url, target_dir=None, verbose=False)\n\n\nsource\n\n\ndownload_data\n\n download_data (url, fname=None, c_key='archive', force_download=False,\n timeout=4, verbose=False)\n\nDownload url to fname.\n\nsource\n\n\nget_UCR_univariate_list\n\n get_UCR_univariate_list ()\n\n\nsource\n\n\nget_UCR_multivariate_list\n\n get_UCR_multivariate_list ()\n\n\nsource\n\n\nget_UCR_data\n\n get_UCR_data (dsid, path='.', parent_dir='data/UCR', on_disk=True,\n mode='c', Xdtype='float32', ydtype=None, return_split=True,\n split_data=True, force_download=False, verbose=False)\n\n\nfrom fastai.data.transforms import get_files\n\n\nPATH = Path('.')\ndsids = ['ECGFiveDays', 'AtrialFibrillation'] # univariate and multivariate\nfor dsid in dsids:\n print(dsid)\n tgt_dir = PATH/f'data/UCR/{dsid}'\n if os.path.isdir(tgt_dir): shutil.rmtree(tgt_dir)\n test_eq(len(get_files(tgt_dir)), 0) # no file left\n X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)\n test_eq(len(get_files(tgt_dir, '.npy')), 6)\n test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir\n del X_train, y_train, X_valid, y_valid\n X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)\n test_eq(X_train.ndim, 3)\n test_eq(y_train.ndim, 1)\n test_eq(X_valid.ndim, 3)\n test_eq(y_valid.ndim, 1)\n test_eq(len(get_files(tgt_dir, '.npy')), 6)\n test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir\n test_eq(X_train.ndim, 3)\n test_eq(y_train.ndim, 1)\n test_eq(X_valid.ndim, 3)\n test_eq(y_valid.ndim, 1)\n test_eq(X_train.dtype, np.float32)\n test_eq(X_train.__class__.__name__, 'memmap')\n del X_train, y_train, X_valid, y_valid\n X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, on_disk=False)\n test_eq(X_train.__class__.__name__, 'ndarray')\n del X_train, y_train, X_valid, y_valid\n\nECGFiveDays\nAtrialFibrillation\n\n\n\nX_train, y_train, X_valid, y_valid = get_UCR_data('natops')\n\n\ndsid = 'natops' \nX_train, y_train, X_valid, y_valid = get_UCR_data(dsid, verbose=True)\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntest_eq(X[splits[0]], X_train)\ntest_eq(y[splits[1]], y_valid)\ntest_eq(X[splits[0]], X_train)\ntest_eq(y[splits[1]], y_valid)\ntest_type(X, X_train)\ntest_type(y, y_train)\n\nDataset: NATOPS\nX_train: (180, 24, 51)\ny_train: (180,)\nX_valid: (180, 24, 51)\ny_valid: (180,) \n\n\n\n\nsource\n\n\ncheck_data\n\n check_data (X, y=None, splits=None, show_plot=True)\n\n\ndsid = 'ECGFiveDays'\nX, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)\ncheck_data(X, y, splits)\ncheck_data(X[:, 0], y, splits)\ny = y.astype(np.float32)\ncheck_data(X, y, splits)\ny[:10] = np.nan\ncheck_data(X[:, 0], y, splits)\nX, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)\nsplits = get_splits(y, 3)\ncheck_data(X, y, splits)\ncheck_data(X[:, 0], y, splits)\ny[:5]= np.nan\ncheck_data(X[:, 0], y, splits)\nX, y, splits = get_UCR_data(dsid, split_data=False, on_disk=False, force_download=False)\n\nX - shape: [884 samples x 1 features x 136 timesteps] type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:<U1 n_classes: 2 (442 samples per class) ['1', '2'] isnan: False\nsplits - n_splits: 2 shape: [23, 861] overlap: False\nX - shape: (884, 136) type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:<U1 n_classes: 2 (442 samples per class) ['1', '2'] isnan: False\nsplits - n_splits: 2 shape: [23, 861] overlap: False\nX - shape: [884 samples x 1 features x 136 timesteps] type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:float32 isnan: 0\nsplits - n_splits: 2 shape: [23, 861] overlap: False\nX - shape: (884, 136) type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:float32 isnan: 10\nsplits - n_splits: 2 shape: [23, 861] overlap: False\nX - shape: [884 samples x 1 features x 136 timesteps] type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:<U1 n_classes: 2 (442 samples per class) ['1', '2'] isnan: False\nsplits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]] overlap: [False, False, False]\nX - shape: (884, 136) type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:<U1 n_classes: 2 (442 samples per class) ['1', '2'] isnan: False\nsplits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]] overlap: [False, False, False]\nX - shape: (884, 136) type: ndarray dtype:float32 isnan: 0\ny - shape: (884,) type: ndarray dtype:<U1 n_classes: 3 (294 samples per class) ['1', '2', 'n'] isnan: False\nsplits - n_splits: 3 shape: [[589, 295], [589, 295], [590, 294]] overlap: [False, False, False]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_70492/278801922.py:23: UserWarning: y contains nan values\n warnings.warn('y contains nan values')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nget_Monash_regression_list\n\n get_Monash_regression_list ()\n\n\nsource\n\n\nget_Monash_regression_data\n\n get_Monash_regression_data (dsid, path='./data/Monash', on_disk=True,\n mode='c', Xdtype='float32', ydtype=None,\n split_data=True, force_download=False,\n verbose=False, timeout=4)\n\n\ndsid = \"Covid3Month\"\nX_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid, on_disk=False, split_data=True, force_download=False)\nX, y, splits = get_Monash_regression_data(dsid, on_disk=True, split_data=False, force_download=False, verbose=True)\nif X_train is not None: \n test_eq(X_train.shape, (140, 1, 84))\nif X is not None: \n test_eq(X.shape, (201, 1, 84))\n\nDataset: Covid3Month\nX : (201, 1, 84)\ny : (201,)\nsplits : (#140) [0,1,2,3,4,5,6,7,8,9...] (#61) [140,141,142,143,144,145,146,147,148,149...] \n\n\n\n\nsource\n\n\nget_forecasting_list\n\n get_forecasting_list ()\n\n\nsource\n\n\nget_forecasting_time_series\n\n get_forecasting_time_series (dsid, path='./data/forecasting/',\n force_download=False, verbose=True,\n **kwargs)\n\n\nts = get_forecasting_time_series(\"sunspots\", force_download=False)\ntest_eq(len(ts), 2820)\nts\n\nDataset: Sunspots\ndownloading data...\n...done. Path = data/forecasting/Sunspots.csv\n\n\n\n\n\n\n\n\n\nSunspots\n\n\nMonth\n\n\n\n\n\n1749-01-31\n58.0\n\n\n1749-02-28\n62.6\n\n\n1749-03-31\n70.0\n\n\n1749-04-30\n55.7\n\n\n1749-05-31\n85.0\n\n\n...\n...\n\n\n1983-08-31\n71.8\n\n\n1983-09-30\n50.3\n\n\n1983-10-31\n55.8\n\n\n1983-11-30\n33.3\n\n\n1983-12-31\n33.4\n\n\n\n\n2820 rows × 1 columns\n\n\n\n\nts = get_forecasting_time_series(\"weather\", force_download=False)\nif ts is not None: \n test_eq(len(ts), 70091)\n display(ts)\n\nDataset: Weather\ndownloading data...\n...done. Path = data/forecasting/Weather.csv.zip\n\n\n\n\n\n\n\n\n\np (mbar)\nT (degC)\nTpot (K)\nTdew (degC)\nrh (%)\nVPmax (mbar)\nVPact (mbar)\nVPdef (mbar)\nsh (g/kg)\nH2OC (mmol/mol)\nrho (g/m**3)\nWx\nWy\nmax Wx\nmax Wy\nDay sin\nDay cos\nYear sin\nYear cos\n\n\n\n\n0\n996.50\n-8.05\n265.38\n-8.78\n94.40\n3.33\n3.14\n0.19\n1.96\n3.15\n1307.86\n-0.204862\n-0.046168\n-0.614587\n-0.138503\n-1.776611e-12\n1.000000\n0.009332\n0.999956\n\n\n1\n996.62\n-8.88\n264.54\n-9.77\n93.20\n3.12\n2.90\n0.21\n1.81\n2.91\n1312.25\n-0.245971\n-0.044701\n-0.619848\n-0.112645\n2.588190e-01\n0.965926\n0.010049\n0.999950\n\n\n2\n996.84\n-8.81\n264.59\n-9.66\n93.50\n3.13\n2.93\n0.20\n1.83\n2.94\n1312.18\n-0.175527\n0.039879\n-0.614344\n0.139576\n5.000000e-01\n0.866025\n0.010766\n0.999942\n\n\n3\n996.99\n-9.05\n264.34\n-10.02\n92.60\n3.07\n2.85\n0.23\n1.78\n2.85\n1313.61\n-0.050000\n-0.086603\n-0.190000\n-0.329090\n7.071068e-01\n0.707107\n0.011483\n0.999934\n\n\n4\n997.46\n-9.63\n263.72\n-10.65\n92.20\n2.94\n2.71\n0.23\n1.69\n2.71\n1317.19\n-0.368202\n0.156292\n-0.810044\n0.343843\n8.660254e-01\n0.500000\n0.012199\n0.999926\n\n\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n\n\n70086\n1002.18\n-0.98\n272.01\n-5.36\n72.00\n5.69\n4.09\n1.59\n2.54\n4.08\n1280.70\n-0.855154\n-0.160038\n-1.336792\n-0.250174\n-9.990482e-01\n0.043619\n0.006183\n0.999981\n\n\n70087\n1001.40\n-1.40\n271.66\n-6.84\n66.29\n5.51\n3.65\n1.86\n2.27\n3.65\n1281.87\n-0.716196\n-0.726267\n-1.348134\n-1.367090\n-9.537170e-01\n0.300706\n0.006900\n0.999976\n\n\n70088\n1001.19\n-2.75\n270.32\n-6.90\n72.90\n4.99\n3.64\n1.35\n2.26\n3.63\n1288.02\n-0.661501\n0.257908\n-1.453438\n0.566672\n-8.433914e-01\n0.537300\n0.007617\n0.999971\n\n\n70089\n1000.65\n-2.89\n270.22\n-7.15\n72.30\n4.93\n3.57\n1.37\n2.22\n3.57\n1288.03\n-0.280621\n-0.209169\n-0.545207\n-0.406385\n-6.755902e-01\n0.737277\n0.008334\n0.999965\n\n\n70090\n1000.11\n-3.93\n269.23\n-8.09\n72.60\n4.56\n3.31\n1.25\n2.06\n3.31\n1292.41\n-0.516998\n-0.215205\n-0.923210\n-0.384295\n-4.617486e-01\n0.887011\n0.009050\n0.999959\n\n\n\n\n70091 rows × 19 columns\n\n\n\n\nsource\n\n\nconvert_tsf_to_dataframe\n\n convert_tsf_to_dataframe (full_file_path_and_name,\n replace_missing_vals_with='NaN',\n value_column_name='series_value')\n\n\nsource\n\n\nget_Monash_forecasting_data\n\n get_Monash_forecasting_data (dsid, path='./data/forecasting/',\n force_download=False,\n remove_from_disk=False, add_timestamp=True,\n verbose=True)\n\n\nsource\n\n\nget_fcst_horizon\n\n get_fcst_horizon (frequency, dsid)\n\n\nsource\n\n\npreprocess_Monash_df\n\n preprocess_Monash_df (df, frequency)\n\n\ndsid = 'covid_deaths_dataset'\ndf = get_Monash_forecasting_data(dsid, force_download=False)\nif df is not None: \n test_eq(df.shape, (56392, 3))\n\nDataset: covid_deaths_dataset\ndownloading data...\n...data downloaded\ndecompressing data...\n...data decompressed\nconverting data to dataframe...\n...done\n\nfreq : daily\nforecast_horizon : 30\ncontain_missing_values : False\ncontain_equal_length : True\n\nexploding dataframe...\n...done\n\n\ndata.shape: (56392, 3)\n\n\n\nsource\n\n\ndownload_all_long_term_forecasting_data\n\n download_all_long_term_forecasting_data\n (target_dir='./data/long_forecas\n ting/', force_download=False,\n remove_zip=False,\n c_key='archive', timeout=4,\n verbose=True)\n\n\nsource\n\n\nunzip_file\n\n unzip_file (file, target_dir)\n\n\nsource\n\n\nget_long_term_forecasting_data\n\n get_long_term_forecasting_data (dsid,\n target_dir='./data/long_forecasting/',\n task='M', fcst_horizon=None,\n fcst_history=None, preprocess=True,\n force_download=False, remove_zip=False,\n return_df=True, show_plot=True,\n dtype=<class 'numpy.float32'>,\n verbose=True, **kwargs)\n\nDownloads (and preprocess) a pandas dataframe with the requested long-term forecasting dataset\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndsid\n\n\nID of the dataset to be used for long-term forecasting.\n\n\ntarget_dir\nstr\n./data/long_forecasting/\nDirectory where the long-term forecasting data will be saved.\n\n\ntask\nstr\nM\n‘M’ for multivariate, ‘S’ for univariate and ‘MS’ for multivariate input with univariate output\n\n\nfcst_horizon\nNoneType\nNone\n# historical steps used as input. If None, the default is applied.\n\n\nfcst_history\nNoneType\nNone\n# steps forecasted into the future. If None, the minimum default is applied.\n\n\npreprocess\nbool\nTrue\nFlag that indicates whether if the data is preprocessed before saving.\n\n\nforce_download\nbool\nFalse\nFlag that indicates if the data should be downloaded again even if directory exists.\n\n\nremove_zip\nbool\nFalse\nFlag that indicates if the zip file should be removed after extracting the data.\n\n\nreturn_df\nbool\nTrue\nFlag that indicates whether a dataframe (True) or X and and y arrays (False) are returned.\n\n\nshow_plot\nbool\nTrue\nplot the splits\n\n\ndtype\ntype\nfloat32\n\n\n\nverbose\nbool\nTrue\nFlag tto indicate the verbosity.\n\n\nkwargs\n\n\n\n\n\n\n\ndsid = \"ILI\"\ntry:\n df = get_long_term_forecasting_data(dsid, target_dir='./data/forecasting/', force_download=False)\n print(f\"{dsid:15}: {str(df.shape):15}\")\n del df; gc.collect()\n remove_dir('./data/forecasting/', False)\nexcept Exception as e:\n print(f\"{dsid:15}: {str(e):15}\")\n\n\n\n\n\n\n \n \n 100.01% [54001664/53995526 00:09<00:00]\n \n \n\n\n/Users/nacho/opt/anaconda3/envs/py39t20/lib/python3.9/site-packages/fastai/tabular/core.py:23: UserWarning: The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n\n\nILI : (966, 8) \n\n\n\ndsid = \"ILI\"\ntry:\n X, y, splits, stats = get_long_term_forecasting_data(dsid, target_dir='./data/forecasting/', force_download=False, return_df=False, show_plot=False)\n print(f\"{dsid:15} - X.shape: {str(X.shape):20} y.shape: {str(y.shape):20} splits: {str([len(s) for s in splits]):25} \\\nstats: {str([s.shape for s in stats]):30}\")\n del X, y, splits, stats\n gc.collect()\n remove_dir('./data/forecasting/', False)\nexcept Exception as e:\n print(f\"{dsid:15}: {str(e):15}\")\n\n\n\n\n\n\n \n \n 100.01% [54001664/53995526 00:09<00:00]\n \n \n\n\n/Users/nacho/opt/anaconda3/envs/py39t20/lib/python3.9/site-packages/fastai/tabular/core.py:23: UserWarning: The argument 'infer_datetime_format' is deprecated and will be removed in a future version. A strict version of it is now the default, see https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. You can safely remove this argument.\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n\n\nILI - X.shape: (839, 7, 104) y.shape: (839, 7, 24) splits: [549, 74, 170] stats: [(1, 7, 1), (1, 7, 1)]", + "crumbs": [ + "Data", + "External data" + ] + }, + { + "objectID": "metrics.html", + "href": "metrics.html", + "title": "Metrics", + "section": "", + "text": "Metrics not included in fastai.\n\n\nsource\n\nMatthewsCorrCoefBinary\n\n MatthewsCorrCoefBinary (sample_weight=None)\n\nMatthews correlation coefficient for single-label classification problems\n\nsource\n\n\nget_task_metrics\n\n get_task_metrics (dls, binary_metrics=None, multi_class_metrics=None,\n regression_metrics=None, verbose=True)\n\nAll metrics applicable to multi classification have been created by Doug Williams (https://github.com/williamsdoug). Thanks a lot Doug!!\n\nsource\n\n\nF1_multi\n\n F1_multi (*args, **kwargs)\n\n\nsource\n\n\nFbeta_multi\n\n Fbeta_multi (inp, targ, beta=1.0, thresh=0.5, sigmoid=True)\n\nComputes Fbeta when inp and targ are the same size.\n\nsource\n\n\nbalanced_accuracy_multi\n\n balanced_accuracy_multi (inp, targ, thresh=0.5, sigmoid=True)\n\nComputes balanced accuracy when inp and targ are the same size.\n\nsource\n\n\nspecificity_multi\n\n specificity_multi (inp, targ, thresh=0.5, sigmoid=True)\n\nComputes specificity (true negative rate) when inp and targ are the same size.\n\nsource\n\n\nrecall_multi\n\n recall_multi (inp, targ, thresh=0.5, sigmoid=True)\n\nComputes recall when inp and targ are the same size.\n\nsource\n\n\nprecision_multi\n\n precision_multi (inp, targ, thresh=0.5, sigmoid=True)\n\nComputes precision when inp and targ are the same size.\n\nsource\n\n\nmetrics_multi_common\n\n metrics_multi_common (inp, targ, thresh=0.5, sigmoid=True,\n by_sample=False)\n\nComputes TP, TN, FP, FN when inp and targ are the same size.\n\nsource\n\n\naccuracy_multi\n\n accuracy_multi (inp, targ, thresh=0.5, sigmoid=True, by_sample=False)\n\nComputes accuracy when inp and targ are the same size.\n\nsource\n\n\nmae\n\n mae (inp, targ)\n\nMean absolute error between inp and targ.\n\nsource\n\n\nmape\n\n mape (inp, targ)\n\nMean absolute percentage error between inp and targ.\n\nn_classes = 4\ninp = torch.normal(0, 1, (16, 20, n_classes))\ntarg = torch.randint(0, n_classes, (16, 20)).to(torch.int8)\n_mAP(inp, targ)\n\n0.27493315845795063", + "crumbs": [ + "Training", + "Metrics" + ] + }, + { + "objectID": "models.xresnet1dplus.html", + "href": "models.xresnet1dplus.html", + "title": "XResNet1dPlus", + "section": "", + "text": "This is a modified version of fastai’s XResNet model in github\n\n\nsource\n\nXResNet1dPlus\n\n XResNet1dPlus (block=<class 'tsai.models.layers.ResBlock1dPlus'>,\n expansion=4, layers=[3, 4, 6, 3], fc_dropout=0.0, c_in=3,\n c_out=None, n_out=1000, seq_len=None, stem_szs=(32, 32,\n 64), widen=1.0, sa=False, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ks=3, stride=2,\n coord=False, custom_head=None, block_szs_base=(64, 128,\n 256, 512), groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sym=False, norm='Batch', zero_norm=True,\n pool=<function AvgPool>, pool_first=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nxresnet1d50_deeperplus\n\n xresnet1d50_deeperplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2,\n ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None,\n dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34_deeperplus\n\n xresnet1d34_deeperplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2,\n ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None,\n dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18_deeperplus\n\n xresnet1d18_deeperplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2,\n ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None,\n dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d50_deepplus\n\n xresnet1d50_deepplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34_deepplus\n\n xresnet1d34_deepplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18_deepplus\n\n xresnet1d18_deepplus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d152plus\n\n xresnet1d152plus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None, dw=False,\n g2=1, sa=False, sym=False, norm_type=<NormType.Batch:\n 1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d101plus\n\n xresnet1d101plus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None, dw=False,\n g2=1, sa=False, sym=False, norm_type=<NormType.Batch:\n 1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d50plus\n\n xresnet1d50plus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=<NormType.Batch: 1>,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34plus\n\n xresnet1d34plus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=<NormType.Batch: 1>,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18plus\n\n xresnet1d18plus (c_in, c_out, seq_len=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=<NormType.Batch: 1>,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nseq_len\nNoneType\nNone\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nnet = xresnet1d18plus(3, 2, coord=True)\nx = torch.rand(32, 3, 50)\nnet(x)\n\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2]\n\n\nTensorBase([[ 0.1829, 0.3597],\n [ 0.0274, -0.1443],\n [ 0.0240, -0.2374],\n [-0.1323, -0.6574],\n [ 0.1481, -0.1438],\n [ 0.2410, -0.1225],\n [-0.1186, -0.1978],\n [-0.0640, -0.4547],\n [-0.0229, -0.3214],\n [ 0.2336, -0.4466],\n [-0.1843, -0.0934],\n [-0.0416, 0.1997],\n [-0.0109, -0.0253],\n [ 0.3014, -0.2193],\n [ 0.0966, 0.0602],\n [ 0.2364, 0.2209],\n [-0.1437, -0.1476],\n [ 0.0070, -0.2900],\n [ 0.2807, 0.4797],\n [-0.2386, -0.1563],\n [ 0.1620, -0.2285],\n [ 0.0479, -0.2348],\n [ 0.1573, -0.4420],\n [-0.5469, 0.1512],\n [ 0.0243, -0.1806],\n [ 0.3396, 0.1434],\n [ 0.0666, -0.1644],\n [ 0.3286, -0.5637],\n [ 0.0993, -0.6281],\n [-0.1068, -0.0763],\n [-0.2713, 0.1946],\n [-0.1416, -0.4043]], grad_fn=<AliasBackward0>)\n\n\n\nbs, c_in, seq_len = 2, 4, 32\nc_out = 2\nx = torch.rand(bs, c_in, seq_len)\narchs = [\n xresnet1d18plus, xresnet1d34plus, xresnet1d50plus, \n xresnet1d18_deepplus, xresnet1d34_deepplus, xresnet1d50_deepplus, xresnet1d18_deeperplus,\n xresnet1d34_deeperplus, xresnet1d50_deeperplus\n# # Long test\n# xresnet1d101, xresnet1d152,\n]\nfor i, arch in enumerate(archs):\n print(i, arch.__name__)\n test_eq(arch(c_in, c_out, sa=True, act=Mish, coord=True)(x).shape, (bs, c_out))\n\n0 xresnet1d18plus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2]\n1 xresnet1d34plus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3]\n2 xresnet1d50plus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3]\n3 xresnet1d18_deepplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 2, 2, 1, 1]\n4 xresnet1d34_deepplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3, 1, 1]\n5 xresnet1d50_deepplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3, 1, 1]\n6 xresnet1d18_deeperplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [2, 2, 1, 1, 1, 1, 1, 1]\n7 xresnet1d34_deeperplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3, 1, 1, 1, 1]\n8 xresnet1d50_deeperplus\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 4 layers [3, 4, 6, 3, 1, 1, 1, 1]\n\n\n\nm = xresnet1d34plus(4, 2, act=Mish)\ntest_eq(len(get_layers(m, is_bn)), 38)\ntest_eq(check_weight(m, is_bn)[0].sum(), 22)\n\nblock <class 'tsai.models.layers.ResBlock1dPlus'> expansion 1 layers [3, 4, 6, 3]", + "crumbs": [ + "Models", + "CNNs", + "XResNet1dPlus" + ] + }, + { + "objectID": "models.rnn_fcn.html", + "href": "models.rnn_fcn.html", + "title": "RNN_FCN", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nMGRU_FCN\n\n MGRU_FCN (*args, se=16, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nMLSTM_FCN\n\n MLSTM_FCN (*args, se=16, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nMRNN_FCN\n\n MRNN_FCN (*args, se=16, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nGRU_FCN\n\n GRU_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,\n bias=True, cell_dropout=0, rnn_dropout=0.8, bidirectional=False,\n shuffle=True, fc_dropout=0.0, conv_layers=[128, 256, 128],\n kss=[7, 5, 3], se=0)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nLSTM_FCN\n\n LSTM_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,\n bias=True, cell_dropout=0, rnn_dropout=0.8,\n bidirectional=False, shuffle=True, fc_dropout=0.0,\n conv_layers=[128, 256, 128], kss=[7, 5, 3], se=0)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nRNN_FCN\n\n RNN_FCN (c_in, c_out, seq_len=None, hidden_size=100, rnn_layers=1,\n bias=True, cell_dropout=0, rnn_dropout=0.8, bidirectional=False,\n shuffle=True, fc_dropout=0.0, conv_layers=[128, 256, 128],\n kss=[7, 5, 3], se=0)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\ntest_eq(RNN_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(LSTM_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(MLSTM_FCN(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(GRU_FCN(n_vars, c_out, shuffle=False)(xb).shape, [bs, c_out])\ntest_eq(GRU_FCN(n_vars, c_out, seq_len, shuffle=False)(xb).shape, [bs, c_out])\n\n\nLSTM_FCN(n_vars, seq_len, c_out, se=8)\n\nLSTM_FCN(\n (rnn): LSTM(2, 100, batch_first=True)\n (rnn_dropout): Dropout(p=0.8, inplace=False)\n (convblock1): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (se1): SqueezeExciteBlock(\n (avg_pool): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (fc): Sequential(\n (0): Linear(in_features=128, out_features=16, bias=False)\n (1): ReLU()\n (2): Linear(in_features=16, out_features=128, bias=False)\n (3): Sigmoid()\n )\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (se2): SqueezeExciteBlock(\n (avg_pool): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (fc): Sequential(\n (0): Linear(in_features=256, out_features=32, bias=False)\n (1): ReLU()\n (2): Linear(in_features=32, out_features=256, bias=False)\n (3): Sigmoid()\n )\n )\n (convblock3): ConvBlock(\n (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (concat): Concat(dim=1)\n (fc): Linear(in_features=228, out_features=12, bias=True)\n)", + "crumbs": [ + "Models", + "Hybrid models", + "RNN_FCN" + ] + }, + { + "objectID": "models.mlp.html", + "href": "models.mlp.html", + "title": "MLP", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:\nFawaz, H. I., Forestier, G., Weber, J., Idoumghar, L., & Muller, P. A. (2019). Deep learning for time series classification: a review. Data Mining and Knowledge Discovery, 33(4), 917-963.\nOfficial MLP TensorFlow implementation: https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mlp.py\n\nsource\n\nMLP\n\n MLP (c_in, c_out, seq_len, layers=[500, 500, 500], ps=[0.1, 0.2, 0.2],\n act=ReLU(inplace=True), use_bn=False, bn_final=False,\n lin_first=False, fc_dropout=0.0, y_range=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nnvars = 3\nseq_len = 128\nc_out = 2\nxb = torch.rand(bs, nvars, seq_len)\nmodel = MLP(nvars, c_out, seq_len)\ntest_eq(model(xb).shape, (bs, c_out))\nmodel\n\nMLP(\n (flatten): Reshape(bs)\n (mlp): ModuleList(\n (0): LinBnDrop(\n (0): Dropout(p=0.1, inplace=False)\n (1): Linear(in_features=384, out_features=500, bias=True)\n (2): ReLU(inplace=True)\n )\n (1): LinBnDrop(\n (0): Dropout(p=0.2, inplace=False)\n (1): Linear(in_features=500, out_features=500, bias=True)\n (2): ReLU(inplace=True)\n )\n (2): LinBnDrop(\n (0): Dropout(p=0.2, inplace=False)\n (1): Linear(in_features=500, out_features=500, bias=True)\n (2): ReLU(inplace=True)\n )\n )\n (head): Sequential(\n (0): LinBnDrop(\n (0): Linear(in_features=500, out_features=2, bias=True)\n )\n )\n)", + "crumbs": [ + "Models", + "MLPs", + "MLP" + ] + }, + { + "objectID": "models.tst.html", + "href": "models.tst.html", + "title": "TST", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@timeseriesAI.co based on: * George Zerveas et al. A Transformer-based Framework for Multivariate Time Series Representation Learning, in Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD ’21), August 14–18, 2021. ArXiV version: https://arxiv.org/abs/2010.02803 * Official implementation: https://github.com/gzerveas/mvts_transformer\nThis paper uses ‘Attention is all you need’ as a major reference: * Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., … & Polosukhin, I. (2017). Attention is all you need. In Advances in neural information processing systems (pp. 5998-6008).\nThis implementation is adapted to work with the rest of the tsai library, and contain some hyperparameters that are not available in the original implementation. They are included to experiment with them.", + "crumbs": [ + "Models", + "Transformers", + "TST" + ] + }, + { + "objectID": "models.tst.html#tst-arguments", + "href": "models.tst.html#tst-arguments", + "title": "TST", + "section": "TST arguments", + "text": "TST arguments\nUsual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers.\nThe default values are the ones selected as a default configuration in the latter.\n\nc_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var\nc_out: the number of target classes. dls.c\nseq_len: number of time steps in the time series. dls.len\nmax_seq_len: useful to control the temporal resolution in long time series to avoid memory issues. Default. None.\nd_model: total dimension of the model (number of features created by the model). Usual values: 128-1024. Default: 128.\nn_heads: parallel attention heads. Usual values: 8-16. Default: 16.\nd_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.\ndropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.\nactivation: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.\nn_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.\nfc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.\ny_range: range of possible y values (used in regression tasks). Default: None\nkwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series.", + "crumbs": [ + "Models", + "Transformers", + "TST" + ] + }, + { + "objectID": "models.tst.html#imports", + "href": "models.tst.html#imports", + "title": "TST", + "section": "Imports", + "text": "Imports", + "crumbs": [ + "Models", + "Transformers", + "TST" + ] + }, + { + "objectID": "models.tst.html#tst", + "href": "models.tst.html#tst", + "title": "TST", + "section": "TST", + "text": "TST\n\nt = torch.rand(16, 50, 128)\noutput, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)\noutput.shape, attn.shape\n\n(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))\n\n\n\nt = torch.rand(16, 50, 128)\noutput = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)\noutput.shape\n\ntorch.Size([16, 50, 128])\n\n\n\nsource\n\nTST\n\n TST (c_in:int, c_out:int, seq_len:int, max_seq_len:Optional[int]=None,\n n_layers:int=3, d_model:int=128, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None, d_ff:int=256,\n dropout:float=0.1, act:str='gelu', fc_dropout:float=0.0,\n y_range:Optional[tuple]=None, verbose:bool=False, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 5000\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nmax_seq_len = 256\nd_model = 128\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\ndropout = 0.1\nactivation = \"gelu\"\nn_layers = 3\nfc_dropout = 0.1\nkwargs = {}\n\nmodel = TST(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, activation=activation, n_layers=n_layers,\n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 517378\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nmax_seq_len = 120\nd_model = 128\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\ndropout = 0.1\nact = \"gelu\"\nn_layers = 3\nfc_dropout = 0.1\nkwargs = {}\n# kwargs = dict(kernel_size=5, padding=2)\n\nmodel = TST(c_in, c_out, seq_len, max_seq_len=max_seq_len, d_model=d_model, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, dropout=dropout, act=act, n_layers=n_layers,\n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 420226", + "crumbs": [ + "Models", + "Transformers", + "TST" + ] + }, + { + "objectID": "data.validation.html", + "href": "data.validation.html", + "title": "Spliting data", + "section": "", + "text": "Functions required to perform cross-validation and transform unique time series sequence into multiple samples ready to be used by a time series model.\n\n\nsource\n\nRandomSplitter\n\n RandomSplitter (valid_pct=0.2, seed=None)\n\nCreate function that splits items between train/val with valid_pct randomly.\n\nsource\n\n\nbalance_idx\n\n balance_idx (o, shuffle=False, strategy='oversample', random_state=None,\n verbose=False)\n\n\nsource\n\n\nleakage_finder\n\n leakage_finder (*splits, verbose=True)\n\nYou can pass splits as a tuple, or train, valid, …\n\nsource\n\n\ncheck_splits_overlap\n\n check_splits_overlap (splits)\n\n\nsource\n\n\ncheck_overlap\n\n check_overlap (a, b, c=None)\n\nChecks if there’s overlap between array-like objects\n\na = np.arange(10)\nb = np.arange(10, 20)\ntest_eq(check_overlap(a, b), False)\na = np.arange(10)\nb = np.arange(9, 20)\ntest_eq(check_overlap(a, b), [9])\na = np.arange(10)\nb = np.arange(10, 20)\nc = np.arange(20, 30)\ntest_eq(check_overlap(a, b, c), False)\na = np.arange(10)\nb = np.arange(10, 20)\nc = np.arange(10, 30)\ntest_eq(check_overlap(a, b, c), ([], [], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))\n\n\ny = np.concatenate([[i] * np.random.randint(10, 100) for i in range(5)])\ntrain_split = np.random.choice(len(y), int(len(y) * .8), False)\nc, v = np.unique(y[train_split], return_counts=True)\nprint(f\"{'imbalanced:':25} {c} {v}\")\n\noversampled_train_split = train_split[balance_idx(y[train_split], strategy=\"oversample\")]\nosc, osv = np.unique(y[oversampled_train_split], return_counts=True)\nprint(f\"{'balanced (oversample):':25} {osc} {osv}\")\ntest_eq(osv, [max(v)] * len(v))\n\nundersampled_train_split = train_split[balance_idx(y[train_split], strategy=\"undersample\")]\nusc, usv = np.unique(y[undersampled_train_split], return_counts=True)\nprint(f\"{'balanced (undersample):':25} {usc} {usv}\")\ntest_eq(usv, [min(v)] * len(v))\n\nimbalanced: [0 1 2 3 4] [24 43 64 41 8]\nbalanced (oversample): [0 1 2 3 4] [64 64 64 64 64]\nbalanced (undersample): [0 1 2 3 4] [8 8 8 8 8]\n\n\n\nl = L(list(concat(np.zeros(5), np.ones(10)).astype(int)))\nbalanced_idx = balance_idx(l)\ntest_eq(np.mean(l[balanced_idx]), 0.5)\ntest_eq(isinstance(balanced_idx, L), True)\n\nl = list(concat(np.zeros(5), np.ones(10)).astype(int))\nbalanced_idx = balance_idx(l)\ntest_eq(np.mean(L(l)[balanced_idx]), 0.5)\ntest_eq(isinstance(balanced_idx, L), True)\n\na = concat(np.zeros(5), np.ones(10)).astype(int)\nbalanced_idx = balance_idx(a)\ntest_eq(np.mean(a[balanced_idx]), 0.5)\ntest_eq(isinstance(balanced_idx, L), True)\n\nt = concat(torch.zeros(5), torch.ones(10))\nbalanced_idx = balance_idx(t, shuffle=True)\ntest_eq(t[balanced_idx].mean(), 0.5)\ntest_eq(isinstance(balanced_idx, L), True)\n\n\na, b = np.arange(100_000), np.arange(100_000, 200_000)\n\n\nsoft_labels = True\nfilter_pseudolabels = .5\nbalanced_pseudolabels = True\n\npseudolabels = torch.rand(1000, 3)\npseudolabels = torch.softmax(pseudolabels, -1) if soft_labels else torch.argmax(pseudolabels, -1)\nhpl = torch.argmax(pseudolabels, -1) if soft_labels else pseudolabels\n\nif filter_pseudolabels and pseudolabels.ndim > 1: \n error = 1 - pseudolabels.max(-1).values\n filt_pl_idx = np.arange(len(error))[error < filter_pseudolabels]\n filt_pl = pseudolabels[error < filter_pseudolabels]\n assert len(filt_pl) > 0, 'no filtered pseudolabels'\n filt_hpl = torch.argmax(filt_pl, -1)\nelse: \n filt_pl_idx = np.arange(len(pseudolabels))\n filt_pl = filt_hpl = pseudolabels\n\n\npl_split = filt_pl_idx[balance_idx(filt_hpl)] if balanced_pseudolabels else filt_pl_idx\ntest_eq(hpl[pl_split].float().mean(), np.mean(np.unique(hpl)))\n\n\nsource\n\n\nTrainValidTestSplitter\n\n TrainValidTestSplitter (n_splits:int=1, valid_size:Union[float,int]=0.2,\n test_size:Union[float,int]=0.0,\n train_only:bool=False, stratify:bool=True,\n balance:bool=False, strategy:str='oversample',\n shuffle:bool=True,\n random_state:Optional[int]=None,\n verbose:bool=False, **kwargs)\n\nSplit items into random train, valid (and test optional) subsets.\n\nsource\n\n\nplot_splits\n\n plot_splits (splits)\n\n\nsource\n\n\nget_splits\n\n get_splits (o, n_splits:int=1, valid_size:float=0.2, test_size:float=0.0,\n train_only:bool=False,\n train_size:Union[NoneType,float,int]=None,\n balance:bool=False, strategy:str='oversample',\n shuffle:bool=True, stratify:bool=True,\n check_splits:bool=True, random_state:Optional[int]=None,\n show_plot:bool=True, verbose:bool=False)\n\nArguments: o : object to which splits will be applied, usually target. n_splits : number of folds. Must be an int >= 1. valid_size : size of validation set. Only used if n_splits = 1. If n_splits > 1 valid_size = (1. - test_size) / n_splits. test_size : size of test set. Default = 0. train_only : if True valid set == train set. This may be useful for debugging purposes. train_size : size of the train set used. Default = None (the remainder after assigning both valid and test). Useful for to get learning curves with different train sizes or get a small batch to debug a neural net. balance : whether to balance data so that train always contain the same number of items per class. strategy : strategy to balance data (“undersample” or “oversample”). Default = “oversample”. shuffle : whether to shuffle data before splitting into batches. Note that the samples within each split will be shuffle. stratify : whether to create folds preserving the percentage of samples for each class. check_splits : whether to perform leakage and completion checks. random_state : when shuffle is True, random_state affects the ordering of the indices. Pass an int for reproducible output. show_plot : plot the split distribution\n\nn_splits = 5\nvalid_size = 0.2\ntest_size = 0.2\ntrain_only = False # set to True for debugging (valid = train)\ntrain_size = 5000\nstratify = True\nbalance = False\nshuffle = True\npredefined_splits = None\nshow_plot = True \n\n\ncheck_splits = True\nrandom_state = 23\n\ny = np.random.randint(0, 3, 10000) + 100\n\nsplits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, balance=balance, stratify=stratify,\n train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)\nsplits\n\n\n\n\n\n\n\n\n(((#5000) [3490,2428,4475,8317,2802,6834,2954,7671,3383,9554...],\n (#1600) [1680,6677,5879,4428,5511,8312,372,5127,7012,3021...],\n (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),\n ((#5000) [3442,4237,470,3901,3808,3793,6286,8546,6254,9530...],\n (#1600) [9160,5451,3628,143,2054,7225,7124,8057,1405,5089...],\n (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),\n ((#5000) [9850,7451,7338,9742,3258,1527,4450,5678,2932,1693...],\n (#1600) [6186,5970,376,7848,3786,1663,7193,3647,3277,553...],\n (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),\n ((#5000) [1853,7308,7375,3851,1852,3820,2601,3868,8718,7190...],\n (#1600) [4182,6419,6265,4837,168,9627,2500,9951,1610,7547...],\n (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]),\n ((#5000) [7878,6392,453,4817,4676,5738,6482,4033,8114,7337...],\n (#1600) [7682,6416,2877,9164,1583,342,2916,4806,8776,2046...],\n (#2000) [1263,6498,1602,1838,1073,5304,1210,1037,8789,6175...]))\n\n\n\ntrain_size=256\ny = np.random.randint(0, 3, 1000) + 100\nsplits = get_splits(y, train_size=train_size, train_only=True)\ntest_eq(splits[0], splits[1])\ntest_eq(len(splits[0]), train_size)\nsplits\n\nvalid == train\n\n\n\n\n\n\n\n\n\n((#256) [550,813,388,595,948,198,354,749,175,812...],\n (#256) [550,813,388,595,948,198,354,749,175,812...])\n\n\n\nsource\n\n\nget_walk_forward_splits\n\n get_walk_forward_splits (o, n_splits=1, train_size=None, valid_size=0.2,\n test_size=0.0, anchored=False, gap=0.0,\n test_after_valid=True, random_state=None,\n show_plot=True)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\no\n\n\n3D object with shape [samples x features x steps] containing the time series we need to split\n\n\nn_splits\nint\n1\n# of splits\n\n\ntrain_size\nNoneType\nNone\noptional: training set size as an int or a float. None when using and anchored strategy.\n\n\nvalid_size\nfloat\n0.2\nvalidation set size as an int or a float\n\n\ntest_size\nfloat\n0.0\ntest set size as an int or a float\n\n\nanchored\nbool\nFalse\nstarting point for train set remains the same for all splits\n\n\ngap\nfloat\n0.0\n# of samples to exclude from the end of each train set before the validation set. Entered as an int or a float\n\n\ntest_after_valid\nbool\nTrue\nflag to indicate if validation and test will be samples randomly or sequentially\n\n\nrandom_state\nNoneType\nNone\ninteger that can be used to generate reproducible results\n\n\nshow_plot\nbool\nTrue\nplots the splits created\n\n\n\n\no = np.random.rand(10_000, 3, 50) # shape: [samples x features x steps]\n\nsplits = get_walk_forward_splits(\n o, \n n_splits=4, \n train_size=.6,\n valid_size=0.1, \n test_size=0.1, \n anchored = True,\n gap = 100,\n test_after_valid = True,\n random_state = None,\n show_plot=True,\n)\n\nsplits = get_walk_forward_splits(\n o, \n n_splits=3, \n train_size=0.3,\n valid_size=0.1, \n test_size=0.1, \n anchored = False,\n gap = 0.,\n test_after_valid = False,\n random_state = None,\n show_plot=True,\n)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nTSSplitter\n\n TSSplitter (valid_size=0.2, test_size=0.0, fcst_horizon=0,\n show_plot=True)\n\nCreate function that splits items between train/val with valid_size without shuffling data.\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nvalid_size\nfloat\n0.2\nint or float indicating the validation set size\n\n\ntest_size\nfloat\n0.0\nint or float indicating the test set size\n\n\nfcst_horizon\nint\n0\nint that indicates the number of time steps removed at the end of train (and validation)\n\n\nshow_plot\nbool\nTrue\nflag that indicates if a plot showing the splits will be created\n\n\n\n\ny = np.arange(1000) + 100\ntest_eq(TimeSplitter(valid_size=0.2)(y)[1], L(np.arange(800, 1000).tolist()))\ntest_eq(TimeSplitter(valid_size=0.2)(y)[0], TimeSplitter(valid_size=200)(y)[0])\nTimeSplitter(valid_size=0.2, show_plot=True)(y)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n((#800) [0,1,2,3,4,5,6,7,8,9...],\n (#200) [800,801,802,803,804,805,806,807,808,809...])\n\n\n\nn_splits = 5\nvalid_size = 0.2 \ntest_size = 0\ntrain_only = False # set to True for debugging (valid = train)\ntrain_size = None\nstratify = True\nbalance = True\nshuffle = True\npredefined_splits = None\nshow_plot = True \n\n\ncheck_splits = True\nrandom_state = 23\n\nsplits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, balance=balance, stratify=stratify,\n train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)\nsplit = splits[0] if n_splits == 1 else splits[0][0]\ny[split].mean(), split\n\nstratify set to False as n_splits=5 cannot be greater than the min number of members in each class (1).\n\n\n\n\n\n\n\n\n\n\nlist([splits[0], splits[1], splits[2], splits[3], splits[4]])\n\n[((#800) [314,194,782,789,502,917,137,415,904,181...],\n (#200) [362,151,934,378,95,597,500,117,980,844...]),\n ((#800) [312,198,777,788,515,910,145,413,898,186...],\n (#200) [352,133,955,396,64,596,442,79,991,882...]),\n ((#800) [311,197,783,791,507,922,145,416,908,184...],\n (#200) [338,125,912,361,54,594,486,88,994,859...]),\n ((#800) [296,181,782,789,493,917,130,401,905,165...],\n (#200) [405,199,953,444,113,610,515,137,997,881...]),\n ((#800) [320,190,782,788,506,906,141,412,893,178...],\n (#200) [336,149,942,358,49,582,472,70,990,907...])]\n\n\n\nn_splits = 5\nvalid_size = 0.\ntest_size = 0.\nshuffle = True\nstratify = True\ntrain_only = True\ntrain_size = None\ncheck_splits = True\nrandom_state = 1\nshow_plot = True \n\nsplits = get_splits(y, n_splits=n_splits, valid_size=valid_size, test_size=test_size, shuffle=shuffle, stratify=stratify,\n train_only=train_only, train_size=train_size, check_splits=check_splits, random_state=random_state, show_plot=show_plot, verbose=True)\nfor split in splits: \n test_eq(len(split[0]), len(y))\n test_eq(np.sort(split[0]), np.arange(len(y)))\n\nstratify set to False as n_splits=5 cannot be greater than the min number of members in each class (1).\nvalid == train\n\n\n\n\n\n\n\n\n\n\nn_splits = 5\ny = np.random.randint(0, 2, 1000)\n\nsplits = get_splits(y, n_splits=n_splits, shuffle=False, check_splits=True)\ntest_eq(np.concatenate((L(zip(*splits))[1])), np.arange(len(y)))\n\nsplits = get_splits(y, n_splits=n_splits, shuffle=True, check_splits=True)\ntest_eq(np.sort(np.concatenate((L(zip(*splits))[1]))), np.arange(len(y)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nn_splits = 2\ny = np.random.randint(0, 2, 1000)\n\nsplits = get_splits(y, n_splits=n_splits, test_size=0.2, shuffle=False)\nfor i in range(n_splits): leakage_finder(*splits[i])\ntest_eq(len(splits), n_splits)\ntest_eq(len(splits[0]), 3)\ns = []\n[s.extend(split) for split in splits[0]]\ntest_eq(np.sort(s), np.arange(len(y)))\ns = []\n[s.extend(split) for split in splits[1]]\ntest_eq(np.sort(s), np.arange(len(y)))\n\n\n\n\n\n\n\n\n\ny = np.random.randint(0, 2, 1000)\nsplits1 = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)\nsplits2 = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)\nsplits3 = get_splits(y, valid_size=.25, test_size=0, random_state=None, stratify=True, shuffle=True)\nsplits4 = get_splits(y, valid_size=.25, test_size=0, random_state=None, stratify=True, shuffle=True)\ntest_eq(splits1[0], splits2[0])\ntest_ne(splits3[0], splits4[0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ny = np.random.randint(0, 2, 100)\nsplits = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True, shuffle=True)\ntest_eq(len(splits), 2)\n\n\n\n\n\n\n\n\n\ny = np.random.randint(0, 2, 100)\nsplits = get_splits(y, valid_size=.25, test_size=0, random_state=23, stratify=True)\ntest_eq(len(splits), 2)\n\n\n\n\n\n\n\n\n\ny = np.random.randint(0, 2, 100)\nsplits = get_splits(y, valid_size=.25, test_size=20, random_state=23, stratify=True)\ntest_eq(len(splits), 3)\nleakage_finder(*splits)\n\n\n\n\n\n\n\n\n\nsplits = TrainValidTestSplitter(valid_size=.25, test_size=20, random_state=23, stratify=True)(np.random.randint(0, 2, 100))\ntest_eq(len(splits[1]), 25)\ntest_eq(len(splits[2]), 20)\n\n\no = np.random.randint(0, 2, 1000)\nfor p in [1, .75, .5, .25, .125]:\n splits = get_splits(o, train_size=p)\n test_eq(len(splits[0]), len(o) * .8 * p)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ny = L([0] * 50 + [1] * 25 + [2] * 15 + [3] * 10)\nsplits = get_splits(y, valid_size=.2, test_size=.2)\ntest_eq(np.mean(y[splits[0]])==np.mean(y[splits[1]])==np.mean(y[splits[2]]), True)\nsplits\n\n\n\n\n\n\n\n\n((#60) [58,95,53,44,28,69,9,12,22,88...],\n (#20) [89,71,60,4,19,37,75,13,46,30...],\n (#20) [76,68,74,29,16,97,14,21,90,82...])\n\n\n\ny = L([0] * 50 + [1] * 25 + [2] * 15 + [3] * 10)\nsplits = get_splits(y, n_splits=1, valid_size=.2, test_size=.2, shuffle=False)\n# test_eq(splits[0] + splits[1] + splits[2], np.arange(100))\nsplits\n\n\n\n\n\n\n\n\n((#60) [0,1,2,3,4,5,6,7,8,9...],\n (#20) [60,61,62,63,64,65,66,67,68,69...],\n (#20) [80,81,82,83,84,85,86,87,88,89...])\n\n\n\nsplits = get_splits(np.random.randint(0,5,100), valid_size=0.213, test_size=17)\ntest_eq(len(splits[1]), 21)\ntest_eq(len(splits[2]), 17)\n\n\n\n\n\n\n\n\n\nsplits = get_splits(np.random.randint(0,5,100), valid_size=0.213, test_size=17, train_size=.2)\nsplits\n\n\n\n\n\n\n\n\n((#12) [37,38,62,60,16,22,95,44,94,98...],\n (#21) [88,93,5,31,57,23,90,18,15,40...],\n (#17) [4,86,47,33,59,52,99,48,70,3...])\n\n\n\nsource\n\n\ncombine_split_data\n\n combine_split_data (xs, ys=None)\n\nxs is a list with X_train, X_valid, …. ys is None or a list with y_train, y_valid, ….\n\nsource\n\n\nget_predefined_splits\n\n get_predefined_splits (*xs)\n\nxs is a list with X_train, X_valid, …\n\nsource\n\n\nget_splits_len\n\n get_splits_len (splits)\n\n\nX_train, y_train, X_valid, y_valid = np.random.rand(3,3,4), np.random.randint(0,2,3), np.random.rand(2,3,4), np.random.randint(0,2,2)\nX, y, splits = combine_split_data([X_train, X_valid], [y_train, y_valid])\ntest_eq(X_train, X[splits[0]])\ntest_eq(X_valid, X[splits[1]])\ntest_type(X_train, X)\ntest_type(y_train, y)\n\n\nX_train, y_train, X_valid, y_valid = np.random.rand(3,4), np.random.randint(0,2,3), np.random.rand(2,4), np.random.randint(0,2,2)\nX, y, splits = combine_split_data([X_train, X_valid], [y_train, y_valid])\ntest_eq(X_train[:, None], X[splits[0]])\ntest_eq(X_valid[:, None], X[splits[1]])\ntest_type(X_train, X)\ntest_type(y_train, y)\n\n\n\nForecasting\n\nsource\n\nget_df_usable_idxs\n\n get_df_usable_idxs (df, fcst_history, fcst_horizon, stride=1,\n unique_id_cols=None, return_np_indices=False)\n\nCalculates the indices that can be used from a df when using a sliding window\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing a sorted time series\n\n\nfcst_history\n\n\n# historical steps used as input (size of the sliding window for the input)\n\n\nfcst_horizon\n\n\n# steps forecasted into the future (size of the sliding window for the target)\n\n\nstride\nint\n1\nint or tuple of 2 int containing the strides of the sliding windows (input and target)\n\n\nunique_id_cols\nNoneType\nNone\nstr indicating the column/s with the unique identifier/s for each entity\n\n\nreturn_np_indices\nbool\nFalse\nbool indicating what type of indices are returned. Default to False (dataframe indices)\n\n\n\n\nsource\n\n\nget_usable_idxs\n\n get_usable_idxs (df, fcst_history, fcst_horizon, stride=1)\n\n\nsource\n\n\ncalculate_fcst_stats\n\n calculate_fcst_stats (df, fcst_history, fcst_horizon, splits,\n x_vars=None, y_vars=None, subset_size=None)\n\nCalculates the training stats required in a forecasting task\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing a sorted time series for a single entity or subject\n\n\nfcst_history\n\n\n# historical steps used as input.\n\n\nfcst_horizon\n\n\n# steps forecasted into the future.\n\n\nsplits\n\n\nsplits that will be used to train the model. splits[0] is the train split:\n\n\nx_vars\nNoneType\nNone\nfeatures used as input\n\n\ny_vars\nNoneType\nNone\nfeatures used as output\n\n\nsubset_size\nNoneType\nNone\nint or float to determne the number of train samples used to calculate the mean and std\n\n\n\n\nsource\n\n\nget_forecasting_splits\n\n get_forecasting_splits (df, fcst_history, fcst_horizon, stride=1,\n valid_size=0.0, test_size=0.2,\n valid_cutoff_datetime=None,\n test_cutoff_datetime=None, datetime_col=None,\n use_index=False, unique_id_cols=None,\n show_plot=True)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing a sorted time series\n\n\nfcst_history\n\n\n# historical steps used as input (size of the sliding window for the input)\n\n\nfcst_horizon\n\n\n# steps forecasted into the future (size of the sliding window for the target)\n\n\nstride\nint\n1\nint or tuple of 2 int containing the strides of the sliding windows (input and target)\n\n\nvalid_size\nfloat\n0.0\nint or float indicating the size of the training set (based on datetimes)\n\n\ntest_size\nfloat\n0.2\nint or float indicating the size of the test set (based on datetimes)\n\n\nvalid_cutoff_datetime\nNoneType\nNone\nfirst prediction datetime of validation dataset\n\n\ntest_cutoff_datetime\nNoneType\nNone\nfirst prediction datetime of test dataset\n\n\ndatetime_col\nNoneType\nNone\nstr indicating the column with the datetime values\n\n\nuse_index\nbool\nFalse\nflag to indicate if the datetime is in the index\n\n\nunique_id_cols\nNoneType\nNone\nstr indicating the column/s with the unique identifier/s for each entity\n\n\nshow_plot\nbool\nTrue\nflag to indicate if splits should be plotted\n\n\n\n\ndf1_len = 100\ndf2_len = 80\n\ndatetime_col = 'datetime' \ndf1 = pd.DataFrame(np.arange(df1_len), columns=['value'])\ndf1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')\ndf1['type'] = 1\n\ndf = df1\ndisplay(df)\n\n# settings\nfcst_history = 10\nfcst_horizon = 1\nstride = 1\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = False\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n# settings\nfcst_history = 10\nfcst_horizon = 5\nstride = 5\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = False\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n\n\n\n\n\n\n\nvalue\ndatetime\ntype\n\n\n\n\n0\n0\n1749-03-31\n1\n\n\n1\n1\n1749-04-01\n1\n\n\n2\n2\n1749-04-02\n1\n\n\n3\n3\n1749-04-03\n1\n\n\n4\n4\n1749-04-04\n1\n\n\n...\n...\n...\n...\n\n\n95\n95\n1749-07-04\n1\n\n\n96\n96\n1749-07-05\n1\n\n\n97\n97\n1749-07-06\n1\n\n\n98\n98\n1749-07-07\n1\n\n\n99\n99\n1749-07-08\n1\n\n\n\n\n100 rows × 3 columns\n\n\n\n\n\n\n\n\n\n\nsplits size : [63, 9, 18] (90: [0.7, 0.1, 0.2])\nsplits size : [12, 2, 4] (18: [0.67, 0.11, 0.22])\n\n\n\n\n\n\n\n\n\n\ndf1_len = 100\ndf2_len = 80\n\ndatetime_col = 'datetime' \ndf1 = pd.DataFrame(np.arange(df1_len), columns=['value'])\ndf1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')\ndf1['type'] = 1\ndf1_index = df1.set_index(\"datetime\")\n\ndf = df1_index\ndisplay(df)\n\n# settings\nfcst_history = 10\nfcst_horizon = 1\nstride = 1\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = True\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n# settings\nfcst_history = 10\nfcst_horizon = 5\nstride = 5\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = True\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n\n\n\n\n\n\n\nvalue\ntype\n\n\ndatetime\n\n\n\n\n\n\n1749-03-31\n0\n1\n\n\n1749-04-01\n1\n1\n\n\n1749-04-02\n2\n1\n\n\n1749-04-03\n3\n1\n\n\n1749-04-04\n4\n1\n\n\n...\n...\n...\n\n\n1749-07-04\n95\n1\n\n\n1749-07-05\n96\n1\n\n\n1749-07-06\n97\n1\n\n\n1749-07-07\n98\n1\n\n\n1749-07-08\n99\n1\n\n\n\n\n100 rows × 2 columns\n\n\n\n\n\n\n\n\n\n\nsplits size : [63, 9, 18] (90: [0.7, 0.1, 0.2])\nsplits size : [12, 2, 4] (18: [0.67, 0.11, 0.22])\n\n\n\n\n\n\n\n\n\n\ndf1_len = 100\ndf2_len = 80\n\ndatetime_col = 'datetime' \ndf1 = pd.DataFrame(np.arange(df1_len), columns=['value'])\ndf1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')\ndf1['type'] = 1\ndf1_index = df1.set_index(\"datetime\")\ndf2 = pd.DataFrame(np.arange(df2_len) * 10, columns=['value'])\ndf2['datetime'] = pd.date_range(pd.to_datetime('1749-04-15'), periods=df2_len, freq='1D')\ndf2['type'] = 2\ndf_comb = pd.concat([df1, df2]).reset_index(drop=True).reset_index(drop=True)\n\n\ndf = df_comb\ndisplay(df)\n\n# settings\nfcst_history = 10\nfcst_horizon = 3\nstride = 1\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = False\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n\n\n\n\n\n\n\nvalue\ndatetime\ntype\n\n\n\n\n0\n0\n1749-03-31\n1\n\n\n1\n1\n1749-04-01\n1\n\n\n2\n2\n1749-04-02\n1\n\n\n3\n3\n1749-04-03\n1\n\n\n4\n4\n1749-04-04\n1\n\n\n...\n...\n...\n...\n\n\n175\n750\n1749-06-29\n2\n\n\n176\n760\n1749-06-30\n2\n\n\n177\n770\n1749-07-01\n2\n\n\n178\n780\n1749-07-02\n2\n\n\n179\n790\n1749-07-03\n2\n\n\n\n\n180 rows × 3 columns\n\n\n\n\n\n\n\n\n\n\nsplits size : [101, 16, 31] (148: [0.68, 0.11, 0.21])\n\n\n\ndf1_len = 100\ndf2_len = 80\n\ndatetime_col = 'datetime' \ndf1 = pd.DataFrame(np.arange(df1_len), columns=['value'])\ndf1['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df1_len, freq='1D')\ndf1['type'] = 1\ndf1_index = df1.set_index(\"datetime\")\ndf2 = pd.DataFrame(np.arange(df2_len) * 10, columns=['value'])\ndf2['datetime'] = pd.date_range(pd.to_datetime('1749-04-15'), periods=df2_len, freq='1D')\ndf2['type'] = 2\ndf_comb = pd.concat([df1, df2]).reset_index(drop=True).reset_index(drop=True)\ndf_comb_index = df_comb.set_index(\"datetime\")\ndf_comb_index.index.name = None\n\n\ndf = df_comb_index\ndisplay(df)\n\n# settings\nfcst_history = 15\nfcst_horizon = 5\nstride = 1\nunique_id_cols = 'type'\ndatetime_col = 'datetime' \nuse_index = True\nvalid_size = 0.1 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\ntest_size = 0.2 # a percent (float) or a number of samples (int) - .1 means 10% of the dates\nvalid_cutoff_datetime = '1749-08-21' # first prediction datetime of validation dataset\ntest_cutoff_datetime = '1749-12-24' # first prediction datetime of test dataset\nvalid_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\ntest_cutoff_datetime = None # datetime compatible with the datetime_col containing the starting date for the validation dataset\n\n\nsplits = get_forecasting_splits(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, stride=stride, \n unique_id_cols=unique_id_cols, datetime_col=datetime_col, use_index=use_index,\n valid_size=valid_size, test_size=test_size, \n valid_cutoff_datetime=valid_cutoff_datetime, test_cutoff_datetime=test_cutoff_datetime)\n\nprint(f\"splits size : {[len(s) for s in splits]} ({sum([len(s) for s in splits])}: {[round(len(s)/sum([len(s) for s in splits]), 2) for s in splits]})\")\n\n\n\n\n\n\n\n\nvalue\ntype\n\n\n\n\n1749-03-31\n0\n1\n\n\n1749-04-01\n1\n1\n\n\n1749-04-02\n2\n1\n\n\n1749-04-03\n3\n1\n\n\n1749-04-04\n4\n1\n\n\n...\n...\n...\n\n\n1749-06-29\n750\n2\n\n\n1749-06-30\n760\n2\n\n\n1749-07-01\n770\n2\n\n\n1749-07-02\n780\n2\n\n\n1749-07-03\n790\n2\n\n\n\n\n180 rows × 2 columns\n\n\n\n\n\n\n\n\n\n\nsplits size : [83, 14, 29] (126: [0.66, 0.11, 0.23])\n\n\n\nsource\n\n\nget_long_term_forecasting_splits\n\n get_long_term_forecasting_splits (df, fcst_history, fcst_horizon,\n dsid=None, show_plot=True)\n\nReturns the train, valid and test splits for long-range time series datasets\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing a sorted time series for a single entity or subject\n\n\nfcst_history\n\n\n# historical steps used as input.\n\n\nfcst_horizon\n\n\n# steps forecasted into the future.\n\n\ndsid\nNoneType\nNone\ndataset name\n\n\nshow_plot\nbool\nTrue\nplot the splits", + "crumbs": [ + "Data", + "Spliting data" + ] + }, + { + "objectID": "data.features.html", + "href": "data.features.html", + "title": "Featurizing Time Series", + "section": "", + "text": "Functions used to transform time series into a dataframe that can be used to create tabular dataloaders.\n\nIn this case we are using tsfresh that is one of the most widely known libraries used to create features from time series. You can get more details about this library here: https://tsfresh.readthedocs.io/en/latest/\n\nsource\n\nget_ts_features\n\n get_ts_features (X:Union[numpy.ndarray,torch.Tensor],\n y:Union[NoneType,numpy.ndarray,torch.Tensor]=None,\n features:Union[str,dict]='min',\n n_jobs:Optional[int]=None, **kwargs)\n\nArgs: X: np.array or torch.Tesnor of shape [samples, dimensions, timesteps]. y: Not required for unlabeled data. Otherwise, you need to pass it. features: ‘min’, ‘efficient’, ‘all’, or a dictionary. Be aware that ‘efficient’ and ‘all’ may required substantial memory and time.\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\nX.shape\n\n(360, 24, 51)\n\n\nThere are 3 levels of fatures you can extract: ‘min’, ‘efficient’ and ‘all’. I’d encourage you to start with min as feature creation may take a long time.\nIn addition to this, you can pass a dictionary to build the desired features (see tsfresh documentation in the link above).\n\nts_features_df = get_ts_features(X, y)\nts_features_df.shape\n\nFeature Extraction: 100%|██████████| 40/40 [00:09<00:00, 4.00it/s]\n\n\n(360, 241)\n\n\nThe ‘min’ set creates a dataframe with 8 features per channel + 1 per target (total 193) for each time series sample (360).\n\ncont_names = ts_features_df.columns[:-1]\ny_names = 'target'\ndls = get_tabular_dls(ts_features_df, splits=splits, cont_names=cont_names, y_names=y_names)\ndls.show_batch()\n\n\n\n\n\n0__sum_values\n0__median\n0__mean\n0__length\n0__standard_deviation\n0__variance\n0__root_mean_square\n0__maximum\n0__absolute_maximum\n0__minimum\n1__sum_values\n1__median\n1__mean\n1__length\n1__standard_deviation\n1__variance\n1__root_mean_square\n1__maximum\n1__absolute_maximum\n1__minimum\n2__sum_values\n2__median\n2__mean\n2__length\n2__standard_deviation\n2__variance\n2__root_mean_square\n2__maximum\n2__absolute_maximum\n2__minimum\n3__sum_values\n3__median\n3__mean\n3__length\n3__standard_deviation\n3__variance\n3__root_mean_square\n3__maximum\n3__absolute_maximum\n3__minimum\n4__sum_values\n4__median\n4__mean\n4__length\n4__standard_deviation\n4__variance\n4__root_mean_square\n4__maximum\n4__absolute_maximum\n4__minimum\n5__sum_values\n5__median\n5__mean\n5__length\n5__standard_deviation\n5__variance\n5__root_mean_square\n5__maximum\n5__absolute_maximum\n5__minimum\n6__sum_values\n6__median\n6__mean\n6__length\n6__standard_deviation\n6__variance\n6__root_mean_square\n6__maximum\n6__absolute_maximum\n6__minimum\n7__sum_values\n7__median\n7__mean\n7__length\n7__standard_deviation\n7__variance\n7__root_mean_square\n7__maximum\n7__absolute_maximum\n7__minimum\n8__sum_values\n8__median\n8__mean\n8__length\n8__standard_deviation\n8__variance\n8__root_mean_square\n8__maximum\n8__absolute_maximum\n8__minimum\n9__sum_values\n9__median\n9__mean\n9__length\n9__standard_deviation\n9__variance\n9__root_mean_square\n9__maximum\n9__absolute_maximum\n9__minimum\n10__sum_values\n10__median\n10__mean\n10__length\n10__standard_deviation\n10__variance\n10__root_mean_square\n10__maximum\n10__absolute_maximum\n10__minimum\n11__sum_values\n11__median\n11__mean\n11__length\n11__standard_deviation\n11__variance\n11__root_mean_square\n11__maximum\n11__absolute_maximum\n11__minimum\n12__sum_values\n12__median\n12__mean\n12__length\n12__standard_deviation\n12__variance\n12__root_mean_square\n12__maximum\n12__absolute_maximum\n12__minimum\n13__sum_values\n13__median\n13__mean\n13__length\n13__standard_deviation\n13__variance\n13__root_mean_square\n13__maximum\n13__absolute_maximum\n13__minimum\n14__sum_values\n14__median\n14__mean\n14__length\n14__standard_deviation\n14__variance\n14__root_mean_square\n14__maximum\n14__absolute_maximum\n14__minimum\n15__sum_values\n15__median\n15__mean\n15__length\n15__standard_deviation\n15__variance\n15__root_mean_square\n15__maximum\n15__absolute_maximum\n15__minimum\n16__sum_values\n16__median\n16__mean\n16__length\n16__standard_deviation\n16__variance\n16__root_mean_square\n16__maximum\n16__absolute_maximum\n16__minimum\n17__sum_values\n17__median\n17__mean\n17__length\n17__standard_deviation\n17__variance\n17__root_mean_square\n17__maximum\n17__absolute_maximum\n17__minimum\n18__sum_values\n18__median\n18__mean\n18__length\n18__standard_deviation\n18__variance\n18__root_mean_square\n18__maximum\n18__absolute_maximum\n18__minimum\n19__sum_values\n19__median\n19__mean\n19__length\n19__standard_deviation\n19__variance\n19__root_mean_square\n19__maximum\n19__absolute_maximum\n19__minimum\n20__sum_values\n20__median\n20__mean\n20__length\n20__standard_deviation\n20__variance\n20__root_mean_square\n20__maximum\n20__absolute_maximum\n20__minimum\n21__sum_values\n21__median\n21__mean\n21__length\n21__standard_deviation\n21__variance\n21__root_mean_square\n21__maximum\n21__absolute_maximum\n21__minimum\n22__sum_values\n22__median\n22__mean\n22__length\n22__standard_deviation\n22__variance\n22__root_mean_square\n22__maximum\n22__absolute_maximum\n22__minimum\n23__sum_values\n23__median\n23__mean\n23__length\n23__standard_deviation\n23__variance\n23__root_mean_square\n23__maximum\n23__absolute_maximum\n23__minimum\ntarget\n\n\n\n\n0\n-29.398621\n-0.578626\n-0.576444\n51.0\n0.019595\n0.000384\n0.576777\n-0.536114\n0.606751\n-0.606751\n-88.761322\n-1.745473\n-1.740418\n51.0\n0.033961\n0.001153\n1.740749\n-1.659181\n1.779713\n-1.779713\n-35.539612\n-0.696636\n-0.696855\n51.0\n0.005860\n0.000034\n0.696880\n-0.685844\n0.710908\n-0.710908\n63.889797\n0.837208\n1.252741\n51.0\n0.676822\n0.458088\n1.423885\n2.169677\n2.169677\n0.536361\n-54.103661\n-1.758527\n-1.060856\n51.0\n0.952467\n0.907193\n1.425696\n0.619366\n1.832052\n-1.832052\n-25.470938\n-0.562629\n-0.499430\n51.0\n0.176602\n0.031188\n0.529735\n-0.166668\n0.659942\n-0.659942\n-33.909939\n-0.666688\n-0.664901\n51.0\n0.013168\n0.000173\n0.665031\n-0.643999\n0.682598\n-0.682598\n-39.194389\n-0.766594\n-0.768517\n51.0\n0.007046\n0.000050\n0.768550\n-0.750228\n0.782717\n-0.782717\n-9.894929\n-0.191213\n-0.194018\n51.0\n0.008862\n0.000079\n0.194220\n-0.182825\n0.210663\n-0.210663\n41.573109\n0.658693\n0.815159\n51.0\n0.227995\n0.051982\n0.846443\n1.144142\n1.144142\n0.601647\n-26.870213\n-0.801317\n-0.526867\n51.0\n0.410643\n0.168628\n0.667994\n0.183081\n0.862615\n-0.862615\n-4.248524\n-0.096245\n-0.083304\n51.0\n0.033176\n0.001101\n0.089668\n-0.021126\n0.121141\n-0.121141\n-33.603584\n-0.660318\n-0.658894\n51.0\n0.016815\n0.000283\n0.659108\n-0.626267\n0.684841\n-0.684841\n-70.926521\n-1.409798\n-1.390716\n51.0\n0.031278\n0.000978\n1.391068\n-1.337275\n1.423652\n-1.423652\n-26.984289\n-0.529285\n-0.529104\n51.0\n0.005923\n0.000035\n0.529137\n-0.517306\n0.548614\n-0.548614\n56.133301\n0.789974\n1.100653\n51.0\n0.483293\n0.233572\n1.202085\n1.757948\n1.757948\n0.587645\n-45.478088\n-1.421026\n-0.891727\n51.0\n0.739772\n0.547262\n1.158637\n0.386504\n1.509923\n-1.509923\n-17.145500\n-0.387209\n-0.336186\n51.0\n0.127724\n0.016313\n0.359631\n-0.108017\n0.469502\n-0.469502\n-25.565697\n-0.502589\n-0.501288\n51.0\n0.016613\n0.000276\n0.501563\n-0.465890\n0.535957\n-0.535957\n-77.823814\n-1.519193\n-1.525957\n51.0\n0.028265\n0.000799\n1.526219\n-1.468685\n1.564035\n-1.564035\n-36.996349\n-0.727857\n-0.725419\n51.0\n0.007882\n0.000062\n0.725461\n-0.705429\n0.735931\n-0.735931\n58.453831\n0.779782\n1.146154\n51.0\n0.682839\n0.466269\n1.334143\n2.093526\n2.093526\n0.433132\n-52.249134\n-1.582441\n-1.024493\n51.0\n0.815997\n0.665852\n1.309747\n0.419112\n1.718448\n-1.718448\n-22.941730\n-0.449813\n-0.449838\n51.0\n0.179541\n0.032235\n0.484344\n-0.149044\n0.718254\n-0.718254\n2.0\n\n\n1\n-22.630985\n-0.572027\n-0.443745\n51.0\n0.548965\n0.301363\n0.705884\n0.844384\n1.284441\n-1.284441\n-43.583847\n-1.497262\n-0.854585\n51.0\n1.019406\n1.039188\n1.330227\n1.096710\n1.692231\n-1.692231\n-31.147585\n-0.398927\n-0.610737\n51.0\n0.360847\n0.130210\n0.709373\n-0.256470\n1.562690\n-1.562690\n35.902119\n0.479088\n0.703963\n51.0\n0.365270\n0.133422\n0.793087\n1.559241\n1.559241\n0.320613\n-26.475592\n-1.411000\n-0.519129\n51.0\n1.250764\n1.564410\n1.354218\n1.246734\n1.762012\n-1.762012\n-34.175106\n-0.647045\n-0.670100\n51.0\n0.273988\n0.075069\n0.723950\n-0.189910\n1.339854\n-1.339854\n-25.383873\n-0.599582\n-0.497723\n51.0\n0.304314\n0.092607\n0.583383\n0.198417\n0.860741\n-0.860741\n-23.190578\n-0.632231\n-0.454717\n51.0\n0.267774\n0.071703\n0.527703\n0.086810\n0.735903\n-0.735903\n-9.272245\n0.044390\n-0.181809\n51.0\n0.310334\n0.096307\n0.359668\n0.103880\n0.687320\n-0.687320\n33.800453\n0.639043\n0.662754\n51.0\n0.155807\n0.024276\n0.680822\n0.971393\n0.971393\n0.338048\n-20.350363\n-0.685427\n-0.399027\n51.0\n0.401511\n0.161211\n0.566068\n0.388883\n0.798647\n-0.798647\n-22.385489\n-0.310659\n-0.438931\n51.0\n0.202867\n0.041155\n0.483545\n-0.223507\n0.780465\n-0.780465\n-23.431786\n-0.607775\n-0.459447\n51.0\n0.502495\n0.252501\n0.680876\n0.557547\n1.097538\n-1.097538\n-35.239632\n-1.173477\n-0.690973\n51.0\n0.790543\n0.624958\n1.049953\n0.850109\n1.334061\n-1.334061\n-23.722792\n-0.263811\n-0.465153\n51.0\n0.304623\n0.092795\n0.556024\n-0.096469\n1.355410\n-1.355410\n36.532707\n0.576822\n0.716328\n51.0\n0.285204\n0.081341\n0.771016\n1.347809\n1.347809\n0.321302\n-23.286812\n-1.098079\n-0.456604\n51.0\n0.934937\n0.874106\n1.040478\n0.973006\n1.405902\n-1.405902\n-31.214712\n-0.552194\n-0.612053\n51.0\n0.146669\n0.021512\n0.629381\n-0.445301\n1.042179\n-1.042179\n-23.984716\n-0.675121\n-0.470289\n51.0\n0.554180\n0.307116\n0.726834\n0.807535\n1.182987\n-1.182987\n-39.006611\n-1.346193\n-0.764835\n51.0\n0.884366\n0.782103\n1.169220\n1.003716\n1.507433\n-1.507433\n-28.776979\n-0.436710\n-0.564255\n51.0\n0.328655\n0.108014\n0.652991\n-0.268483\n1.373182\n-1.373182\n33.181316\n0.482228\n0.650614\n51.0\n0.366002\n0.133957\n0.746496\n1.554717\n1.554717\n0.254808\n-25.558233\n-1.347874\n-0.501142\n51.0\n1.110295\n1.232755\n1.218153\n1.149943\n1.741076\n-1.741076\n-33.443592\n-0.659850\n-0.655757\n51.0\n0.246830\n0.060925\n0.700673\n-0.250632\n1.340311\n-1.340311\n6.0\n\n\n2\n-35.085182\n-0.687561\n-0.687945\n51.0\n0.030869\n0.000953\n0.688637\n-0.624283\n0.739868\n-0.739868\n-106.435272\n-2.126637\n-2.086966\n51.0\n0.089233\n0.007963\n2.088873\n-1.959578\n2.215342\n-2.215342\n-43.562584\n-0.854689\n-0.854168\n51.0\n0.032274\n0.001042\n0.854778\n-0.801072\n0.912399\n-0.912399\n73.886467\n1.213113\n1.448754\n51.0\n0.694268\n0.482008\n1.606517\n2.342902\n2.342902\n0.642018\n-67.402786\n-2.043274\n-1.321623\n51.0\n1.021520\n1.043502\n1.670386\n0.217647\n2.288412\n-2.288412\n-40.294270\n-0.799205\n-0.790084\n51.0\n0.048503\n0.002353\n0.791571\n-0.688059\n0.888159\n-0.888159\n-34.629044\n-0.681434\n-0.679001\n51.0\n0.017385\n0.000302\n0.679223\n-0.649223\n0.703762\n-0.703762\n-40.258858\n-0.804071\n-0.789389\n51.0\n0.033163\n0.001100\n0.790086\n-0.741303\n0.842110\n-0.842110\n-12.998519\n-0.267240\n-0.254873\n51.0\n0.017719\n0.000314\n0.255488\n-0.224532\n0.273925\n-0.273925\n47.912884\n0.805694\n0.939468\n51.0\n0.229189\n0.052528\n0.967020\n1.233679\n1.233679\n0.697387\n-28.587210\n-0.880814\n-0.560534\n51.0\n0.403328\n0.162674\n0.690559\n0.038331\n0.921428\n-0.921428\n-10.081242\n-0.202227\n-0.197671\n51.0\n0.030237\n0.000914\n0.199971\n-0.156339\n0.284775\n-0.284775\n-33.949532\n-0.670309\n-0.665677\n51.0\n0.022919\n0.000525\n0.666072\n-0.629390\n0.697200\n-0.697200\n-85.588051\n-1.724138\n-1.678197\n51.0\n0.080795\n0.006528\n1.680141\n-1.563966\n1.791635\n-1.791635\n-36.548447\n-0.725060\n-0.716636\n51.0\n0.030728\n0.000944\n0.717295\n-0.671330\n0.760870\n-0.760870\n65.181526\n1.113361\n1.278069\n51.0\n0.494449\n0.244479\n1.370380\n1.934985\n1.934985\n0.748902\n-54.790508\n-1.635758\n-1.074324\n51.0\n0.817957\n0.669053\n1.350268\n0.139169\n1.839433\n-1.839433\n-28.428947\n-0.573270\n-0.557430\n51.0\n0.053855\n0.002900\n0.560026\n-0.439413\n0.630352\n-0.630352\n-26.714603\n-0.519391\n-0.523816\n51.0\n0.032224\n0.001038\n0.524806\n-0.489540\n0.659033\n-0.659033\n-93.493767\n-1.865099\n-1.833211\n51.0\n0.092432\n0.008544\n1.835540\n-1.700841\n2.010093\n-2.010093\n-43.577209\n-0.860299\n-0.854455\n51.0\n0.049561\n0.002456\n0.855891\n-0.796042\n1.014816\n-1.014816\n65.893486\n1.051441\n1.292029\n51.0\n0.685999\n0.470595\n1.462851\n2.178514\n2.178514\n0.574974\n-64.151787\n-1.856631\n-1.257878\n51.0\n0.804017\n0.646444\n1.492884\n-0.037161\n2.008020\n-2.008020\n-39.252415\n-0.786807\n-0.769655\n51.0\n0.054600\n0.002981\n0.771589\n-0.582144\n0.898071\n-0.898071\n3.0\n\n\n3\n9.418392\n0.124306\n0.184674\n51.0\n0.683525\n0.467206\n0.708033\n1.098620\n1.098620\n-0.619085\n-65.183624\n-1.598239\n-1.278110\n51.0\n0.931468\n0.867632\n1.581518\n0.194134\n2.234432\n-2.234432\n-41.886803\n-0.976904\n-0.821310\n51.0\n0.252251\n0.063631\n0.859174\n-0.396303\n1.147354\n-1.147354\n45.166958\n0.801148\n0.885627\n51.0\n0.278827\n0.077744\n0.928482\n1.635586\n1.635586\n0.577137\n-35.295097\n-1.093421\n-0.692061\n51.0\n1.567398\n2.456736\n1.713384\n1.325327\n2.416220\n-2.416220\n-19.785007\n-0.294643\n-0.387941\n51.0\n0.282500\n0.079806\n0.479901\n-0.008022\n0.944945\n-0.944945\n-28.304108\n-0.693464\n-0.554982\n51.0\n0.202652\n0.041068\n0.590824\n-0.242434\n0.741845\n-0.741845\n-36.350716\n-0.670180\n-0.712759\n51.0\n0.144370\n0.020843\n0.727233\n-0.523174\n0.890112\n-0.890112\n-28.392370\n-0.522761\n-0.556713\n51.0\n0.107739\n0.011608\n0.567042\n-0.435334\n0.710886\n-0.710886\n38.783321\n0.658475\n0.760457\n51.0\n0.165045\n0.027240\n0.778161\n1.025254\n1.025254\n0.573761\n-29.159790\n-0.807073\n-0.571761\n51.0\n0.451058\n0.203453\n0.728260\n0.058212\n1.089511\n-1.089511\n-0.357431\n0.210966\n-0.007008\n51.0\n0.359132\n0.128976\n0.359200\n0.358109\n0.519043\n-0.519043\n-6.840534\n-0.255334\n-0.134128\n51.0\n0.504682\n0.254704\n0.522201\n0.582558\n0.693457\n-0.693457\n-55.873280\n-1.168818\n-1.095554\n51.0\n0.563111\n0.317094\n1.231801\n-0.219940\n1.721503\n-1.721503\n-37.602516\n-0.815972\n-0.737304\n51.0\n0.176043\n0.030991\n0.758029\n-0.360776\n0.927193\n-0.927193\n43.445599\n0.848623\n0.851875\n51.0\n0.218035\n0.047539\n0.879335\n1.345085\n1.345085\n0.596721\n-33.225483\n-1.050173\n-0.651480\n51.0\n1.146453\n1.314353\n1.318628\n0.890806\n1.963266\n-1.963266\n-12.263644\n-0.319137\n-0.240464\n51.0\n0.276771\n0.076602\n0.366640\n0.244209\n0.701782\n-0.701782\n-1.089832\n-0.055650\n-0.021369\n51.0\n0.549241\n0.301665\n0.549656\n0.656405\n0.802634\n-0.802634\n-58.871620\n-1.318070\n-1.154346\n51.0\n0.793757\n0.630050\n1.400915\n0.036103\n2.034981\n-2.034981\n-38.605354\n-0.873019\n-0.756968\n51.0\n0.309986\n0.096091\n0.817980\n-0.183816\n1.145963\n-1.145963\n44.849518\n0.865092\n0.879402\n51.0\n0.278859\n0.077762\n0.922557\n1.511787\n1.511787\n0.490558\n-35.374996\n-1.168080\n-0.693627\n51.0\n1.392385\n1.938735\n1.555588\n1.122289\n2.264597\n-2.264597\n-23.718605\n-0.533045\n-0.465071\n51.0\n0.283010\n0.080095\n0.544413\n0.070285\n0.913923\n-0.913923\n6.0\n\n\n4\n0.670717\n-0.551674\n0.013151\n51.0\n0.796336\n0.634151\n0.796444\n1.303428\n1.303428\n-0.740258\n-82.721443\n-2.117861\n-1.621989\n51.0\n0.745800\n0.556218\n1.785236\n-0.325693\n2.317141\n-2.317141\n-42.753490\n-0.679642\n-0.838304\n51.0\n0.347571\n0.120805\n0.907501\n-0.450540\n1.606419\n-1.606419\n43.941456\n0.675603\n0.861597\n51.0\n0.571390\n0.326487\n1.033845\n2.105319\n2.105319\n0.364709\n-54.047619\n-2.158519\n-1.059757\n51.0\n1.451326\n2.106348\n1.797063\n1.128832\n2.503777\n-2.503777\n-8.701278\n-0.097304\n-0.170613\n51.0\n0.254706\n0.064875\n0.306568\n0.143676\n0.995945\n-0.995945\n-25.513714\n-0.696055\n-0.500269\n51.0\n0.274384\n0.075287\n0.570575\n0.089958\n0.745964\n-0.745964\n-37.482643\n-0.790602\n-0.734954\n51.0\n0.116809\n0.013644\n0.744178\n-0.541465\n0.878685\n-0.878685\n-33.195599\n-0.672431\n-0.650894\n51.0\n0.199584\n0.039834\n0.680806\n-0.429070\n0.989809\n-0.989809\n34.543270\n0.566918\n0.677319\n51.0\n0.293252\n0.085997\n0.738077\n1.267009\n1.267009\n0.387009\n-30.752882\n-0.775993\n-0.602998\n51.0\n0.315380\n0.099464\n0.680493\n0.063736\n1.075059\n-1.075059\n5.405560\n0.286917\n0.105991\n51.0\n0.283277\n0.080246\n0.302456\n0.384380\n0.392310\n-0.392310\n-10.194630\n-0.575963\n-0.199895\n51.0\n0.608274\n0.369997\n0.640277\n0.946359\n0.946359\n-0.750195\n-67.181175\n-1.644588\n-1.317278\n51.0\n0.480866\n0.231232\n1.402303\n-0.524218\n1.829902\n-1.829902\n-42.235485\n-0.693468\n-0.828147\n51.0\n0.266756\n0.071159\n0.870049\n-0.607596\n1.443514\n-1.443514\n42.286137\n0.849937\n0.829140\n51.0\n0.445894\n0.198822\n0.941432\n1.798162\n1.798162\n0.406039\n-45.729931\n-1.668248\n-0.896665\n51.0\n1.043643\n1.089190\n1.375936\n0.752602\n2.014132\n-2.014132\n-2.545863\n0.035224\n-0.049919\n51.0\n0.176097\n0.031010\n0.183036\n0.169495\n0.488377\n-0.488377\n-7.721382\n-0.629895\n-0.151400\n51.0\n0.731868\n0.535631\n0.747364\n1.081553\n1.081553\n-0.841546\n-73.237610\n-1.826482\n-1.436032\n51.0\n0.668636\n0.447074\n1.584065\n-0.235637\n2.140610\n-2.140610\n-44.032135\n-0.673831\n-0.863375\n51.0\n0.386634\n0.149486\n0.945993\n-0.419474\n1.715174\n-1.715174\n41.510166\n0.733873\n0.813925\n51.0\n0.506757\n0.256803\n0.958789\n1.993738\n1.993738\n0.362309\n-50.167858\n-1.763661\n-0.983684\n51.0\n1.247509\n1.556279\n1.588683\n0.955917\n2.361580\n-2.361580\n-8.757638\n-0.146574\n-0.171718\n51.0\n0.236743\n0.056047\n0.292463\n0.202188\n0.984318\n-0.984318\n6.0\n\n\n5\n-50.560036\n-0.925159\n-0.991373\n51.0\n0.970520\n0.941910\n1.387347\n0.800761\n2.398621\n-2.398621\n-30.233727\n-0.178814\n-0.592818\n51.0\n1.103251\n1.217162\n1.252436\n0.972976\n2.095890\n-2.095890\n-42.399975\n-0.641855\n-0.831372\n51.0\n0.369349\n0.136419\n0.909724\n-0.293571\n1.665465\n-1.665465\n39.197685\n0.735326\n0.768582\n51.0\n0.972865\n0.946467\n1.239833\n2.223790\n2.223790\n-0.900167\n-25.861439\n0.006240\n-0.507087\n51.0\n1.103717\n1.218190\n1.214631\n1.036380\n2.057254\n-2.057254\n-54.733055\n-0.955823\n-1.073197\n51.0\n0.341752\n0.116795\n1.126298\n-0.407128\n1.797132\n-1.797132\n-43.710899\n-0.780402\n-0.857076\n51.0\n0.244302\n0.059683\n0.891215\n-0.568018\n1.237588\n-1.237588\n-16.875654\n-0.136531\n-0.330895\n51.0\n0.380901\n0.145086\n0.504557\n0.301934\n0.825958\n-0.825958\n-12.057025\n-0.090500\n-0.236412\n51.0\n0.246839\n0.060929\n0.341790\n0.002809\n0.739954\n-0.739954\n43.413063\n0.779447\n0.851237\n51.0\n0.224609\n0.050449\n0.880371\n1.244718\n1.244718\n0.445537\n-16.459957\n-0.233873\n-0.322744\n51.0\n0.378000\n0.142884\n0.497039\n0.267569\n0.802695\n-0.802695\n-29.580076\n-0.443360\n-0.580002\n51.0\n0.283071\n0.080129\n0.645392\n-0.340970\n1.223840\n-1.223840\n-46.450630\n-0.899175\n-0.910797\n51.0\n0.711464\n0.506181\n1.155739\n0.568156\n1.918004\n-1.918004\n-25.124174\n-0.161948\n-0.492631\n51.0\n0.804141\n0.646643\n0.943042\n0.727019\n1.617498\n-1.617498\n-31.201376\n-0.434257\n-0.611792\n51.0\n0.313101\n0.098032\n0.687256\n-0.172693\n1.229459\n-1.229459\n41.937992\n0.793140\n0.822314\n51.0\n0.641221\n0.411164\n1.042767\n1.812513\n1.812513\n-0.339336\n-21.495180\n-0.052168\n-0.421474\n51.0\n0.799914\n0.639862\n0.904158\n0.620937\n1.550453\n-1.550453\n-42.943439\n-0.776023\n-0.842028\n51.0\n0.225123\n0.050680\n0.871603\n-0.285606\n1.344810\n-1.344810\n-52.034657\n-0.921795\n-1.020287\n51.0\n0.898630\n0.807537\n1.359604\n0.655126\n2.344413\n-2.344413\n-27.837105\n-0.061648\n-0.545826\n51.0\n0.969040\n0.939039\n1.112189\n0.931854\n1.992733\n-1.992733\n-36.875374\n-0.585629\n-0.723047\n51.0\n0.349626\n0.122238\n0.803141\n-0.215493\n1.429438\n-1.429438\n40.247742\n0.798090\n0.789171\n51.0\n0.786085\n0.617930\n1.113877\n1.984581\n1.984581\n-0.677269\n-23.811928\n-0.119661\n-0.466901\n51.0\n0.912458\n0.832579\n1.024976\n0.800261\n1.841989\n-1.841989\n-52.909077\n-1.040256\n-1.037433\n51.0\n0.335496\n0.112557\n1.090332\n-0.334281\n1.676773\n-1.676773\n4.0\n\n\n6\n-39.833595\n-0.581072\n-0.781051\n51.0\n0.909044\n0.826360\n1.198499\n0.568540\n2.283604\n-2.283604\n-38.821877\n-0.786507\n-0.761213\n51.0\n0.961909\n0.925269\n1.226668\n0.511668\n1.878216\n-1.878216\n-38.064991\n-0.843750\n-0.746372\n51.0\n0.289446\n0.083779\n0.800532\n-0.137036\n1.416119\n-1.416119\n41.832554\n0.615405\n0.820246\n51.0\n0.852135\n0.726135\n1.182767\n2.278694\n2.278694\n-0.571341\n-46.324547\n-0.797607\n-0.908324\n51.0\n0.966797\n0.934696\n1.326556\n0.542440\n2.116034\n-2.116034\n-20.751741\n-0.570023\n-0.406897\n51.0\n0.581464\n0.338100\n0.709694\n0.768066\n1.202626\n-1.202626\n-37.311344\n-0.603243\n-0.731595\n51.0\n0.236690\n0.056022\n0.768930\n-0.428848\n1.151210\n-1.151210\n-24.191328\n-0.593942\n-0.474340\n51.0\n0.321964\n0.103661\n0.573288\n0.118205\n0.778171\n-0.778171\n-10.869180\n-0.166239\n-0.213121\n51.0\n0.118523\n0.014048\n0.243861\n-0.055137\n0.518928\n-0.518928\n37.055542\n0.630720\n0.726579\n51.0\n0.187477\n0.035147\n0.750377\n1.041933\n1.041933\n0.511867\n-27.166332\n-0.715093\n-0.532673\n51.0\n0.297583\n0.088556\n0.610161\n0.032286\n0.820952\n-0.820952\n-5.845339\n-0.214925\n-0.114614\n51.0\n0.219578\n0.048215\n0.247691\n0.302756\n0.333025\n-0.333025\n-38.241142\n-0.594127\n-0.749826\n51.0\n0.627857\n0.394204\n0.977979\n0.256995\n1.747855\n-1.747855\n-32.509758\n-0.579769\n-0.637446\n51.0\n0.690723\n0.477098\n0.939913\n0.338581\n1.427238\n-1.427238\n-26.793001\n-0.502856\n-0.525353\n51.0\n0.140510\n0.019743\n0.543819\n-0.301202\n0.999421\n-0.999421\n38.700047\n0.607511\n0.758824\n51.0\n0.584079\n0.341148\n0.957582\n1.645291\n1.645291\n-0.209163\n-37.871262\n-0.758263\n-0.742574\n51.0\n0.648642\n0.420737\n0.985978\n0.297268\n1.478050\n-1.478050\n-15.965348\n-0.512104\n-0.313046\n51.0\n0.436921\n0.190900\n0.537492\n0.543558\n1.049285\n-1.049285\n-39.408485\n-0.581423\n-0.772715\n51.0\n0.801424\n0.642281\n1.113270\n0.497681\n2.193919\n-2.193919\n-36.982643\n-0.627740\n-0.725150\n51.0\n0.856815\n0.734132\n1.122486\n0.517904\n1.754343\n-1.754343\n-31.530857\n-0.578748\n-0.618252\n51.0\n0.279074\n0.077882\n0.678320\n-0.208244\n1.305216\n-1.305216\n37.909523\n0.533470\n0.743324\n51.0\n0.796210\n0.633951\n1.089257\n2.103847\n2.103847\n-0.552248\n-39.577774\n-0.742289\n-0.776035\n51.0\n0.849310\n0.721327\n1.150460\n0.575411\n1.851035\n-1.851035\n-22.426970\n-0.587765\n-0.439745\n51.0\n0.553202\n0.306033\n0.706688\n0.689770\n1.236947\n-1.236947\n5.0\n\n\n7\n-36.896690\n-0.912707\n-0.723464\n51.0\n1.064085\n1.132277\n1.286732\n0.912205\n2.307096\n-2.307096\n-19.239029\n-0.282667\n-0.377236\n51.0\n1.052407\n1.107561\n1.117975\n0.881856\n2.153838\n-2.153838\n-62.821152\n-1.281961\n-1.231787\n51.0\n0.530944\n0.281901\n1.341343\n0.098188\n2.117577\n-2.117577\n55.359245\n1.107040\n1.085475\n51.0\n0.981749\n0.963832\n1.463588\n2.348134\n2.348134\n-0.507044\n-23.611309\n0.024880\n-0.462967\n51.0\n1.022359\n1.045218\n1.122299\n0.884884\n2.283836\n-2.283836\n-34.417664\n-0.666021\n-0.674856\n51.0\n0.336352\n0.113133\n0.754031\n-0.017647\n1.378753\n-1.378753\n-38.069172\n-0.877256\n-0.746454\n51.0\n0.354419\n0.125613\n0.826321\n-0.077006\n1.163693\n-1.163693\n-10.263086\n-0.051315\n-0.201237\n51.0\n0.370457\n0.137238\n0.421586\n0.256480\n0.847701\n-0.847701\n-30.174412\n-0.570585\n-0.591655\n51.0\n0.173184\n0.029993\n0.616481\n-0.356980\n1.046295\n-1.046295\n46.865932\n0.877540\n0.918940\n51.0\n0.188330\n0.035468\n0.938040\n1.202289\n1.202289\n0.597824\n-24.255749\n-0.452062\n-0.475603\n51.0\n0.309431\n0.095748\n0.567403\n0.061571\n0.980127\n-0.980127\n-10.561463\n-0.084279\n-0.207088\n51.0\n0.266664\n0.071109\n0.337631\n0.100390\n0.787678\n-0.787678\n-35.248955\n-0.933832\n-0.691156\n51.0\n0.792044\n0.627334\n1.051204\n0.613102\n1.636708\n-1.636708\n-16.299442\n-0.262266\n-0.319597\n51.0\n0.805276\n0.648469\n0.866378\n0.682406\n1.797832\n-1.797832\n-50.258915\n-0.981771\n-0.985469\n51.0\n0.327322\n0.107139\n1.038407\n-0.382916\n1.556926\n-1.556926\n50.633633\n1.068598\n0.992816\n51.0\n0.669171\n0.447789\n1.197278\n1.868672\n1.868672\n-0.068966\n-22.703091\n-0.056605\n-0.445159\n51.0\n0.739652\n0.547084\n0.863279\n0.671985\n1.762866\n-1.762866\n-24.762434\n-0.469707\n-0.485538\n51.0\n0.268093\n0.071874\n0.554636\n-0.000126\n0.909962\n-0.909962\n-38.911346\n-0.915691\n-0.762968\n51.0\n0.941245\n0.885943\n1.211636\n0.747633\n2.031039\n-2.031039\n-19.286667\n-0.411864\n-0.378170\n51.0\n0.938402\n0.880599\n1.011737\n0.850090\n1.990998\n-1.990998\n-54.983307\n-1.106866\n-1.078104\n51.0\n0.498255\n0.248258\n1.187673\n-0.044980\n1.856607\n-1.856607\n55.272842\n1.142773\n1.083781\n51.0\n0.912429\n0.832527\n1.416725\n2.338898\n2.338898\n-0.387132\n-24.889832\n-0.118238\n-0.488036\n51.0\n0.872788\n0.761758\n0.999969\n0.686074\n2.103849\n-2.103849\n-32.013580\n-0.695968\n-0.627717\n51.0\n0.353181\n0.124737\n0.720254\n0.051580\n1.284638\n-1.284638\n4.0\n\n\n8\n-26.011322\n-0.536596\n-0.510026\n51.0\n0.055607\n0.003092\n0.513048\n-0.419569\n0.570454\n-0.570454\n-92.222801\n-1.791812\n-1.808290\n51.0\n0.098298\n0.009663\n1.810960\n-1.699334\n1.995639\n-1.995639\n-37.869225\n-0.712115\n-0.742534\n51.0\n0.048071\n0.002311\n0.744088\n-0.677641\n0.836827\n-0.836827\n64.245613\n1.286482\n1.259718\n51.0\n0.586856\n0.344400\n1.389708\n2.007180\n2.007180\n0.607638\n-38.855114\n-1.503779\n-0.761865\n51.0\n1.135116\n1.288489\n1.367087\n1.195478\n1.817826\n-1.817826\n-28.189720\n-0.513122\n-0.552740\n51.0\n0.103227\n0.010656\n0.562296\n-0.369479\n0.759343\n-0.759343\n-27.265202\n-0.548034\n-0.534612\n51.0\n0.022753\n0.000518\n0.535096\n-0.490623\n0.572237\n-0.572237\n-39.857010\n-0.771910\n-0.781510\n51.0\n0.061902\n0.003832\n0.783958\n-0.717654\n0.923757\n-0.923757\n-6.367034\n-0.115645\n-0.124844\n51.0\n0.060107\n0.003613\n0.138560\n-0.062456\n0.225210\n-0.225210\n42.372810\n0.886792\n0.830839\n51.0\n0.201009\n0.040405\n0.854809\n1.109116\n1.109116\n0.605396\n-13.769955\n-0.545444\n-0.269999\n51.0\n0.548812\n0.301195\n0.611632\n0.657959\n0.810047\n-0.810047\n-11.842787\n-0.255244\n-0.232212\n51.0\n0.044901\n0.002016\n0.236513\n-0.145291\n0.307812\n-0.307812\n-28.094807\n-0.559510\n-0.550879\n51.0\n0.043867\n0.001924\n0.552622\n-0.491768\n0.597890\n-0.597890\n-73.582306\n-1.460591\n-1.442790\n51.0\n0.122198\n0.014932\n1.447956\n-1.304898\n1.658409\n-1.658409\n-22.921581\n-0.463139\n-0.449443\n51.0\n0.052460\n0.002752\n0.452494\n-0.385089\n0.526610\n-0.526610\n56.109291\n1.201811\n1.100182\n51.0\n0.445087\n0.198103\n1.186804\n1.676672\n1.676672\n0.602798\n-29.902466\n-1.201119\n-0.586323\n51.0\n0.950869\n0.904151\n1.117106\n1.049786\n1.495527\n-1.495527\n-23.161522\n-0.453892\n-0.454147\n51.0\n0.071312\n0.005085\n0.459712\n-0.308134\n0.624632\n-0.624632\n-30.269337\n-0.606525\n-0.593516\n51.0\n0.053951\n0.002911\n0.595963\n-0.501898\n0.661645\n-0.661645\n-85.624870\n-1.647284\n-1.678919\n51.0\n0.107465\n0.011549\n1.682355\n-1.548876\n1.879864\n-1.879864\n-26.338734\n-0.505198\n-0.516446\n51.0\n0.055644\n0.003096\n0.519435\n-0.443595\n0.624974\n-0.624974\n63.065834\n1.133581\n1.236585\n51.0\n0.536658\n0.288002\n1.348015\n1.935572\n1.935572\n0.574532\n-40.009689\n-1.531141\n-0.784504\n51.0\n1.003145\n1.006301\n1.273478\n0.979440\n1.697684\n-1.697684\n-26.970509\n-0.520441\n-0.528834\n51.0\n0.084388\n0.007121\n0.535524\n-0.347588\n0.714076\n-0.714076\n3.0\n\n\n9\n-24.176491\n-0.488977\n-0.474049\n51.0\n0.043168\n0.001863\n0.476010\n-0.366197\n0.527277\n-0.527277\n-87.327805\n-1.710973\n-1.712310\n51.0\n0.031561\n0.000996\n1.712601\n-1.631805\n1.772976\n-1.772976\n-21.451313\n-0.408822\n-0.420614\n51.0\n0.032761\n0.001073\n0.421888\n-0.376194\n0.493162\n-0.493162\n58.397629\n0.748125\n1.145051\n51.0\n0.692690\n0.479820\n1.338269\n2.103378\n2.103378\n0.480623\n-48.922287\n-1.675132\n-0.959261\n51.0\n0.974441\n0.949535\n1.367375\n0.852968\n1.817363\n-1.817363\n-29.856304\n-0.640901\n-0.585418\n51.0\n0.124195\n0.015424\n0.598447\n-0.312080\n0.750260\n-0.750260\n-27.958775\n-0.565279\n-0.548211\n51.0\n0.035193\n0.001239\n0.549340\n-0.492060\n0.583746\n-0.583746\n-37.864830\n-0.732618\n-0.742448\n51.0\n0.041091\n0.001688\n0.743584\n-0.692059\n0.802448\n-0.802448\n-0.649227\n-0.005362\n-0.012730\n51.0\n0.039293\n0.001544\n0.041304\n0.035067\n0.069732\n-0.069732\n43.424145\n0.733217\n0.851454\n51.0\n0.218725\n0.047841\n0.879099\n1.171012\n1.171012\n0.632703\n-21.846182\n-0.721695\n-0.428356\n51.0\n0.389760\n0.151912\n0.579139\n0.287935\n0.756129\n-0.756129\n-13.696244\n-0.290477\n-0.268554\n51.0\n0.049435\n0.002444\n0.273066\n-0.169651\n0.314362\n-0.314362\n-28.335342\n-0.570419\n-0.555595\n51.0\n0.031328\n0.000981\n0.556478\n-0.508227\n0.598616\n-0.598616\n-67.415817\n-1.322900\n-1.321879\n51.0\n0.020617\n0.000425\n1.322040\n-1.252832\n1.356348\n-1.356348\n-14.509363\n-0.280158\n-0.284497\n51.0\n0.027159\n0.000738\n0.285791\n-0.237379\n0.326241\n-0.326241\n54.223145\n0.808409\n1.063199\n51.0\n0.487772\n0.237921\n1.169749\n1.747880\n1.747880\n0.582484\n-38.194836\n-1.286616\n-0.748918\n51.0\n0.720042\n0.518461\n1.038913\n0.589410\n1.341641\n-1.341641\n-24.306427\n-0.513999\n-0.476597\n51.0\n0.113742\n0.012937\n0.489981\n-0.236320\n0.606179\n-0.606179\n-20.204275\n-0.414832\n-0.396162\n51.0\n0.036496\n0.001332\n0.397840\n-0.328904\n0.434622\n-0.434622\n-71.983742\n-1.408678\n-1.411446\n51.0\n0.031827\n0.001013\n1.411805\n-1.346405\n1.477625\n-1.477625\n-23.991568\n-0.466880\n-0.470423\n51.0\n0.027061\n0.000732\n0.471201\n-0.430881\n0.532167\n-0.532167\n53.270744\n0.767735\n1.044524\n51.0\n0.671843\n0.451373\n1.241936\n2.002148\n2.002148\n0.376754\n-38.753330\n-1.399783\n-0.759869\n51.0\n0.882002\n0.777927\n1.164186\n0.885538\n1.513484\n-1.513484\n-31.401262\n-0.670306\n-0.615711\n51.0\n0.152118\n0.023140\n0.634224\n-0.286498\n0.862852\n-0.862852\n2.0\n\n\n\n\n\n\nx_cat, x_cont, yb = first(dls.train)\nx_cont[:10]\n\ntensor([[-0.8581, -0.1346, -0.8581, ..., 0.3235, 0.5841, -0.6917],\n [-0.0406, 0.0344, -0.0406, ..., -0.0171, -1.7253, 1.2745],\n [ 1.7966, -0.0497, 1.7966, ..., -0.6516, 0.9802, -1.0290],\n ...,\n [ 0.2804, 0.4747, 0.2804, ..., -0.4816, -0.3325, 0.0887],\n [-0.1472, -0.1884, -0.1472, ..., -0.1036, -1.2258, 0.8491],\n [-0.9960, -0.6116, -0.9960, ..., 0.4602, 3.3105, -3.0129]])", + "crumbs": [ + "Data", + "Featurizing Time Series" + ] + }, + { + "objectID": "models.resnet.html", + "href": "models.resnet.html", + "title": "ResNet", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nResNet\n\n ResNet (c_in, c_out)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nResBlock\n\n ResBlock (ni, nf, kss=[7, 5, 3])\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nxb = torch.rand(2, 3, 4)\ntest_eq(ResNet(3,2)(xb).shape, [xb.shape[0], 2])\ntest_eq(count_parameters(ResNet(3, 2)), 479490) # for (3,2)\n\n\nResNet(3,2)\n\nResNet(\n (resblock1): ResBlock(\n (convblock1): ConvBlock(\n (0): Conv1d(3, 64, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock2): ConvBlock(\n (0): Conv1d(64, 64, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock3): ConvBlock(\n (0): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): ConvBlock(\n (0): Conv1d(3, 64, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (resblock2): ResBlock(\n (convblock1): ConvBlock(\n (0): Conv1d(64, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock3): ConvBlock(\n (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): ConvBlock(\n (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (resblock3): ResBlock(\n (convblock1): ConvBlock(\n (0): Conv1d(128, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock3): ConvBlock(\n (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (add): Add\n (act): ReLU()\n )\n (gap): AdaptiveAvgPool1d(output_size=1)\n (squeeze): Squeeze(dim=-1)\n (fc): Linear(in_features=128, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "CNNs", + "ResNet" + ] + }, + { + "objectID": "models.mwdn.html", + "href": "models.mwdn.html", + "title": "mWDN", + "section": "", + "text": "multilevel Wavelet Decomposition Network (mWDN)\n\nThis is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\nsource\n\nWaveBlock\n\n WaveBlock (c_in, c_out, seq_len, wavelet=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nmWDNPlus\n\n mWDNPlus (c_in, c_out, seq_len, d=None, levels=3, wavelet=None,\n base_model=None, base_arch=<class\n 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nmWDNBlocks\n\n mWDNBlocks (c_in, c_out, seq_len, levels=3, wavelet=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nmWDN\n\n mWDN (c_in, c_out, seq_len, levels=3, wavelet=None, base_arch=<class\n 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.models.TSTPlus import TSTPlus\n\n\nbs = 16\nc_in = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, c_in, seq_len).to(default_device())\ntest_eq(mWDN(c_in, c_out, seq_len).to(xb.device)(xb).shape, [bs, c_out])\nmodel = mWDNPlus(c_in, c_out, seq_len, fc_dropout=.5)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nmodel = mWDNPlus(c_in, c_out, seq_len, base_arch=TSTPlus, fc_dropout=.5)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\n\n\nmodel.head, model.head_nf\n\n(Sequential(\n (0): GELU(approximate='none')\n (1): fastai.layers.Flatten(full=False)\n (2): LinBnDrop(\n (0): Dropout(p=0.5, inplace=False)\n (1): Linear(in_features=1536, out_features=2, bias=True)\n )\n ),\n 128)\n\n\n\nbs = 16\nc_in = 3\nseq_len = 12\nd = 10\nc_out = 2\nxb = torch.rand(bs, c_in, seq_len).to(default_device())\nmodel = mWDNPlus(c_in, c_out, seq_len, d=d)\ntest_eq(model.to(xb.device)(xb).shape, [bs, d, c_out])\n\n\nbs = 16\nc_in = 3\nseq_len = 12\nd = (5, 2)\nc_out = 2\nxb = torch.rand(bs, c_in, seq_len).to(default_device())\nmodel = mWDNPlus(c_in, c_out, seq_len, d=d)\ntest_eq(model.to(xb.device)(xb).shape, [bs, *d, c_out])", + "crumbs": [ + "Models", + "Wavelet-based NNs", + "mWDN" + ] + }, + { + "objectID": "models.fcn.html", + "href": "models.fcn.html", + "title": "FCN", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:\n\nWang, Z., Yan, W., & Oates, T. (2017, May). Time series classification from scratch with deep neural networks: A strong baseline. In 2017 international joint conference on neural networks (IJCNN) (pp. 1578-1585). IEEE.\nFawaz, H. I., Forestier, G., Weber, J., Idoumghar, L., & Muller, P. A. (2019). Deep learning for time series classification: a review. Data Mining and Knowledge Discovery, 33(4), 917-963.\n\nOfficial FCN TensorFlow implementation: https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/fcn.py.\nNote: kernel filter size 8 has been replaced by 7 (since we believe it’s a bug).\n\nsource\n\nFCN\n\n FCN (c_in, c_out, layers=[128, 256, 128], kss=[7, 5, 3])\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nnvars = 3\nseq_len = 128\nc_out = 2\nxb = torch.rand(bs, nvars, seq_len)\nmodel = FCN(nvars, c_out)\ntest_eq(model(xb).shape, (bs, c_out))\nmodel\n\nFCN(\n (convblock1): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock3): ConvBlock(\n (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (fc): Linear(in_features=128, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "CNNs", + "FCN" + ] + }, + { + "objectID": "models.gmlp.html", + "href": "models.gmlp.html", + "title": "gMLP", + "section": "", + "text": "This is an unofficial PyTorch implementation based on:\n\nLiu, H., Dai, Z., So, D. R., & Le, Q. V. (2021). Pay Attention to MLPs. arXiv preprint arXiv:2105.08050.\nCholakov, R., & Kolev, T. (2022). The GatedTabTransformer. An enhanced deep learning architecture for tabular modeling. arXiv preprint arXiv:2201.00199.\n\n\nsource\n\ngMLP\n\n gMLP (c_in, c_out, seq_len, patch_size=1, d_model=256, d_ffn=512,\n depth=6)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nbs = 16\nc_in = 3\nc_out = 2\nseq_len = 64\npatch_size = 4\nxb = torch.rand(bs, c_in, seq_len)\nmodel = gMLP(c_in, c_out, seq_len, patch_size=patch_size)\ntest_eq(model(xb).shape, (bs, c_out))", + "crumbs": [ + "Models", + "MLPs", + "gMLP" + ] + }, + { + "objectID": "models.transformerrnnplus.html", + "href": "models.transformerrnnplus.html", + "title": "TransformerRNNPlus", + "section": "", + "text": "These is a Pytorch implementation of a Transformer + RNN created by Ignacio Oguiza - oguiza@timeseriesAI.co inspired by the code created by Baurzhan Urazalinov (https://www.kaggle.com/baurzhanurazalinov).\nBaurzhan Urazalinov won a Kaggle competition (Parkinson’s Freezing of Gait Prediction: Event detection from wearable sensor data - 2023) using the following original tensorflow code:\n\nhttps://www.kaggle.com/code/baurzhanurazalinov/parkinson-s-freezing-defog-training-code\nhttps://www.kaggle.com/code/baurzhanurazalinov/parkinson-s-freezing-tdcsfog-training-code\nhttps://www.kaggle.com/code/baurzhanurazalinov/parkinson-s-freezing-submission-code\n\nI’d like to congratulate Baurzhan for winning this competition, and for sharing the code he used.\n\nfrom tsai.models.utils import count_parameters\n\n\nt = torch.rand(4, 864, 54)\nencoder_layer = torch.nn.TransformerEncoderLayer(54, 6, dim_feedforward=2048, dropout=0.1, \n activation=\"relu\", layer_norm_eps=1e-05, \n batch_first=True, norm_first=False)\nprint(encoder_layer(t).shape)\nprint(count_parameters(encoder_layer))\n\ntorch.Size([4, 864, 54])\n235382\n\n\n\nbs = 4\nc_in = 5\nseq_len = 50\n\nencoder = _TransformerRNNEncoder(nn.LSTM, c_in=c_in, seq_len=seq_len, d_model=128, nhead=4, num_encoder_layers=1, dim_feedforward=None, proj_dropout=0.1, dropout=0.1, num_rnn_layers=3, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nprint(encoder(t).shape)\n\ntorch.Size([4, 1024, 50])\n\n\n\nsource\n\nTransformerGRUPlus\n\n TransformerGRUPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n d_model:int=128, nhead:int=16,\n proj_dropout:float=0.1, num_encoder_layers:int=1,\n dim_feedforward:int=2048, dropout:float=0.1,\n num_rnn_layers:int=1, bidirectional:bool=True,\n custom_head=None, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nNumber of channels in the input tensor.\n\n\nc_out\nint\n\nNumber of output channels.\n\n\nseq_len\nint\n\nNumber of time steps in the input tensor.\n\n\nd\ntuple\nNone\nint or tuple with shape of the output tensor\n\n\nd_model\nint\n128\nTotal dimension of the model.\n\n\nnhead\nint\n16\nNumber of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).\n\n\nproj_dropout\nfloat\n0.1\nDropout probability after the first linear layer. Default: 0.1.\n\n\nnum_encoder_layers\nint\n1\nNumber of transformer encoder layers. Default: 1.\n\n\ndim_feedforward\nint\n2048\nThe dimension of the feedforward network model. Default: 2048.\n\n\ndropout\nfloat\n0.1\nTransformer encoder layers dropout. Default: 0.1.\n\n\nnum_rnn_layers\nint\n1\nNumber of RNN layers in the encoder. Default: 1.\n\n\nbidirectional\nbool\nTrue\nIf True, becomes a bidirectional RNN. Default: True.\n\n\ncustom_head\nNoneType\nNone\nCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.\n\n\nkwargs\n\n\n\n\n\n\n\nsource\n\n\nTransformerLSTMPlus\n\n TransformerLSTMPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n d_model:int=128, nhead:int=16,\n proj_dropout:float=0.1, num_encoder_layers:int=1,\n dim_feedforward:int=2048, dropout:float=0.1,\n num_rnn_layers:int=1, bidirectional:bool=True,\n custom_head=None, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nNumber of channels in the input tensor.\n\n\nc_out\nint\n\nNumber of output channels.\n\n\nseq_len\nint\n\nNumber of time steps in the input tensor.\n\n\nd\ntuple\nNone\nint or tuple with shape of the output tensor\n\n\nd_model\nint\n128\nTotal dimension of the model.\n\n\nnhead\nint\n16\nNumber of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).\n\n\nproj_dropout\nfloat\n0.1\nDropout probability after the first linear layer. Default: 0.1.\n\n\nnum_encoder_layers\nint\n1\nNumber of transformer encoder layers. Default: 1.\n\n\ndim_feedforward\nint\n2048\nThe dimension of the feedforward network model. Default: 2048.\n\n\ndropout\nfloat\n0.1\nTransformer encoder layers dropout. Default: 0.1.\n\n\nnum_rnn_layers\nint\n1\nNumber of RNN layers in the encoder. Default: 1.\n\n\nbidirectional\nbool\nTrue\nIf True, becomes a bidirectional RNN. Default: True.\n\n\ncustom_head\nNoneType\nNone\nCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.\n\n\nkwargs\n\n\n\n\n\n\n\nsource\n\n\nTransformerRNNPlus\n\n TransformerRNNPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n d_model:int=128, nhead:int=16,\n proj_dropout:float=0.1, num_encoder_layers:int=1,\n dim_feedforward:int=2048, dropout:float=0.1,\n num_rnn_layers:int=1, bidirectional:bool=True,\n custom_head=None, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nNumber of channels in the input tensor.\n\n\nc_out\nint\n\nNumber of output channels.\n\n\nseq_len\nint\n\nNumber of time steps in the input tensor.\n\n\nd\ntuple\nNone\nint or tuple with shape of the output tensor\n\n\nd_model\nint\n128\nTotal dimension of the model.\n\n\nnhead\nint\n16\nNumber of parallel attention heads (d_model will be split across nhead - each head will have dimension d_model // nhead).\n\n\nproj_dropout\nfloat\n0.1\nDropout probability after the first linear layer. Default: 0.1.\n\n\nnum_encoder_layers\nint\n1\nNumber of transformer encoder layers. Default: 1.\n\n\ndim_feedforward\nint\n2048\nThe dimension of the feedforward network model. Default: 2048.\n\n\ndropout\nfloat\n0.1\nTransformer encoder layers dropout. Default: 0.1.\n\n\nnum_rnn_layers\nint\n1\nNumber of RNN layers in the encoder. Default: 1.\n\n\nbidirectional\nbool\nTrue\nIf True, becomes a bidirectional RNN. Default: True.\n\n\ncustom_head\nNoneType\nNone\nCustom head that will be applied to the model. If None, a head with c_out outputs will be used. Default: None.\n\n\nkwargs\n\n\n\n\n\n\n\nbs = 4\nc_in = 5\nc_out = 1\nseq_len = 50\nd = None\n\nmodel = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == torch.Size([4]) \nprint(model(t).shape)\n\nmodel = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == torch.Size([4])\nprint(model(t).shape)\n\nmodel = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == torch.Size([4])\nprint(model(t).shape)\n\ntorch.Size([4])\ntorch.Size([4])\ntorch.Size([4])\n\n\n\nbs = 4\nc_in = 5\nc_out = 3\nseq_len = 50\nd = None\n\nmodel = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, c_out)\nprint(model(t).shape)\n\nmodel = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, c_out)\nprint(model(t).shape)\n\nmodel = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, c_out)\nprint(model(t).shape)\n\ntorch.Size([4, 3])\ntorch.Size([4, 3])\ntorch.Size([4, 3])\n\n\n\nbs = 4\nc_in = 5\nc_out = 3\nseq_len = 50\nd = 50\n\nmodel = TransformerRNNPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, d, c_out)\nprint(model(t).shape)\n\nmodel = TransformerLSTMPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, d, c_out)\nprint(model(t).shape)\n\nmodel = TransformerGRUPlus(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, proj_dropout=0.1, d_model=128, nhead=4, num_encoder_layers=2, dropout=0.1, num_rnn_layers=1, bidirectional=True)\nt = torch.randn(bs, c_in, seq_len)\nassert model(t).shape == (bs, d, c_out)\nprint(model(t).shape)\n\ntorch.Size([4, 50, 3])\ntorch.Size([4, 50, 3])\ntorch.Size([4, 50, 3])", + "crumbs": [ + "Models", + "Hybrid models", + "TransformerRNNPlus" + ] + }, + { + "objectID": "models.rnn_fcnplus.html", + "href": "models.rnn_fcnplus.html", + "title": "RNN_FCNPlus", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nMGRU_FCNPlus\n\n MGRU_FCNPlus (*args, se=16, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nMLSTM_FCNPlus\n\n MLSTM_FCNPlus (*args, se=16, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nMRNN_FCNPlus\n\n MRNN_FCNPlus (*args, se=16, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nGRU_FCNPlus\n\n GRU_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,\n rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,\n bidirectional=False, shuffle=True, fc_dropout=0.0,\n use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],\n se=0, custom_head=None)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nLSTM_FCNPlus\n\n LSTM_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,\n rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,\n bidirectional=False, shuffle=True, fc_dropout=0.0,\n use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],\n se=0, custom_head=None)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nRNN_FCNPlus\n\n RNN_FCNPlus (c_in, c_out, seq_len=None, d=None, hidden_size=100,\n rnn_layers=1, bias=True, cell_dropout=0, rnn_dropout=0.8,\n bidirectional=False, shuffle=True, fc_dropout=0.0,\n use_bn=False, conv_layers=[128, 256, 128], kss=[7, 5, 3],\n se=0, custom_head=None)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.models.utils import count_parameters\nfrom tsai.models.RNN_FCN import *\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\ntest_eq(RNN_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(LSTM_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(MLSTM_FCNPlus(n_vars, c_out, seq_len)(xb).shape, [bs, c_out])\ntest_eq(GRU_FCNPlus(n_vars, c_out, shuffle=False)(xb).shape, [bs, c_out])\ntest_eq(GRU_FCNPlus(n_vars, c_out, seq_len, shuffle=False)(xb).shape, [bs, c_out])\ntest_eq(count_parameters(LSTM_FCNPlus(n_vars, c_out, seq_len)), count_parameters(LSTM_FCN(n_vars, c_out, seq_len)))\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\ncustom_head = nn.Linear(228, c_out)\ntest_eq(RNN_FCNPlus(n_vars, c_out, seq_len, custom_head=custom_head)(xb).shape, [bs, c_out])\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nd = 10\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\ntest_eq(RNN_FCNPlus(n_vars, c_out, seq_len, d=d)(xb).shape, [bs, d, c_out])\n\n\nbs = 16\nn_vars = 3\nseq_len = 12\nd = (5, 3)\nc_out = 2\nxb = torch.rand(bs, n_vars, seq_len)\ntest_eq(RNN_FCNPlus(n_vars, c_out, seq_len, d=d)(xb).shape, [bs, *d, c_out])\n\n\nLSTM_FCNPlus(n_vars, seq_len, c_out, se=8)\n\nLSTM_FCNPlus(\n (backbone): _RNN_FCN_Base_Backbone(\n (rnn): LSTM(2, 100, batch_first=True)\n (rnn_dropout): Dropout(p=0.8, inplace=False)\n (convblock1): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (se1): SqueezeExciteBlock(\n (avg_pool): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (fc): Sequential(\n (0): Linear(in_features=128, out_features=16, bias=False)\n (1): ReLU()\n (2): Linear(in_features=16, out_features=128, bias=False)\n (3): Sigmoid()\n )\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (se2): SqueezeExciteBlock(\n (avg_pool): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (fc): Sequential(\n (0): Linear(in_features=256, out_features=32, bias=False)\n (1): ReLU()\n (2): Linear(in_features=32, out_features=256, bias=False)\n (3): Sigmoid()\n )\n )\n (convblock3): ConvBlock(\n (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (concat): Concat(dim=1)\n )\n (head): Sequential(\n (0): Linear(in_features=228, out_features=12, bias=True)\n )\n)", + "crumbs": [ + "Models", + "Hybrid models", + "RNN_FCNPlus" + ] + }, + { + "objectID": "models.explainability.html", + "href": "models.explainability.html", + "title": "Explainability", + "section": "", + "text": "Functionality to help with both global and local explainability.\n\n\nsource\n\nget_attribution_map\n\n get_attribution_map (model, modules, x, y=None, detach=True, cpu=False,\n apply_relu=True)\n\n\nsource\n\n\nget_acts_and_grads\n\n get_acts_and_grads (model, modules, x, y=None, detach=True, cpu=False)\n\nReturns activations and gradients for given modules in a model and a single input or a batch. Gradients require y value(s). If they are not provided, it will use the predictions.", + "crumbs": [ + "Explainability" + ] + }, + { + "objectID": "callback.noisy_student.html", + "href": "callback.noisy_student.html", + "title": "Noisy student", + "section": "", + "text": "Callback to apply noisy student self-training (a semi-supervised learning approach) based on:\nXie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).\n\nsource\n\nNoisyStudent\n\n NoisyStudent (dl2:fastai.data.load.DataLoader, bs:Optional[int]=None,\n l2pl_ratio:int=1, batch_tfms:Optional[list]=None,\n do_setup:bool=True, pseudolabel_sample_weight:float=1.0,\n verbose=False)\n\nA callback to implement the Noisy Student approach. In the original paper this was used in combination with noise: - stochastic depth: .8 - RandAugment: N=2, M=27 - dropout: .5\nSteps: 1. Build the dl you will use as a teacher 2. Create dl2 with the pseudolabels (either soft or hard preds) 3. Pass any required batch_tfms to the callback\n\nfrom tsai.data.all import *\nfrom tsai.models.all import *\nfrom tsai.tslearner import *\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\nX = X.astype(np.float32)\n\n\npseudolabeled_data = X\nsoft_preds = True\n\npseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)\ndsets2 = TSDatasets(pseudolabeled_data, pseudolabels)\ndl2 = TSDataLoader(dsets2, num_workers=0)\nnoisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)\ntfms = [None, TSClassification]\nlearn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)\nlearn.fit_one_cycle(1)\n\nlabels / pseudolabels per training batch : 171 / 85\nrelative labeled/ pseudolabel sample weight in dataset: 4.0\n\nX: torch.Size([171, 24, 51]) X2: torch.Size([85, 24, 51]) X_comb: torch.Size([256, 24, 41])\ny: torch.Size([171]) y2: torch.Size([85]) y_comb: torch.Size([256])\n\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n1.782144\n1.758471\n0.250000\n00:00\n\n\n\n\n\n\npseudolabeled_data = X\nsoft_preds = False\n\npseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)\npseudolabels = pseudolabels.astype(np.float32)\ndsets2 = TSDatasets(pseudolabeled_data, pseudolabels)\ndl2 = TSDataLoader(dsets2, num_workers=0)\nnoisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)\ntfms = [None, TSClassification]\nlearn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=[TSStandardize(), TSRandomSize(.5)], cbs=noisy_student_cb)\nlearn.fit_one_cycle(1)\n\nlabels / pseudolabels per training batch : 171 / 85\nrelative labeled/ pseudolabel sample weight in dataset: 4.0\n\nX: torch.Size([171, 24, 51]) X2: torch.Size([85, 24, 51]) X_comb: torch.Size([256, 24, 51])\ny: torch.Size([171, 6]) y2: torch.Size([85, 6]) y_comb: torch.Size([256, 6])\n\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n1.898401\n1.841182\n0.155556\n00:00", + "crumbs": [ + "Training", + "Callbacks", + "Noisy student" + ] + }, + { + "objectID": "data.preparation.html", + "href": "data.preparation.html", + "title": "Data preparation", + "section": "", + "text": "Functions required to prepare X (and y) from a pandas dataframe.\n\n\nsource\n\napply_sliding_window\n\n apply_sliding_window (data, window_len:Union[int,list],\n horizon:Union[int,list]=0,\n x_vars:Union[int,list]=None,\n y_vars:Union[int,list]=None)\n\nApplies a sliding window on an array-like input to generate a 3d X (and optionally y)\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndata\n\n\nand array-like object with the input data\n\n\nwindow_len\nint | list\n\nsliding window length. When using a list, use negative numbers and 0.\n\n\nhorizon\nint | list\n0\nhorizon\n\n\nx_vars\nint | list\nNone\nindices of the independent variables\n\n\ny_vars\nint | list\nNone\nindices of the dependent variables (target). [] means no y will be created. None means all variables.\n\n\n\n\nsource\n\n\nprepare_sel_vars_and_steps\n\n prepare_sel_vars_and_steps (sel_vars=None, sel_steps=None, idxs=False)\n\n\nsource\n\n\nprepare_idxs\n\n prepare_idxs (o, shape=None)\n\n\ndata = np.arange(20).reshape(-1,1).repeat(3, 1) * np.array([1, 10, 100])\ndf = pd.DataFrame(data, columns=['feat_1', 'feat_2', 'feat_3'])\ndf.head()\n\n\n\n\n\n\n\n\nfeat_1\nfeat_2\nfeat_3\n\n\n\n\n0\n0\n0\n0\n\n\n1\n1\n10\n100\n\n\n2\n2\n20\n200\n\n\n3\n3\n30\n300\n\n\n4\n4\n40\n400\n\n\n\n\n\n\n\n\nwindow_len = 8\nhorizon = 1\nx_vars = None\ny_vars = None\nX, y = apply_sliding_window(data, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)\nprint(np.shares_memory(X, data))\nprint(np.shares_memory(y, data))\nprint(X.shape, y.shape)\ntest_eq(X.shape, (len(df) - (window_len - 1 + horizon), df.shape[1], window_len))\ntest_eq(y.shape, (len(df) - (window_len - 1 + horizon), df.shape[1]))\nX[0], y[0]\n\nTrue\nTrue\n(12, 3, 8) (12, 3)\n\n\n(array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 0, 10, 20, 30, 40, 50, 60, 70],\n [ 0, 100, 200, 300, 400, 500, 600, 700]]),\n array([ 8, 80, 800]))\n\n\n\nwindow_len = 8\nhorizon = 1\nx_vars = None\ny_vars = 0\nX, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)\nprint(np.shares_memory(X, df))\nprint(np.shares_memory(y, df))\nprint(X.shape, y.shape)\ntest_eq(X.shape, (len(df) - (window_len - 1 + horizon), df.shape[1], window_len))\ntest_eq(y.shape, (len(df) - (window_len - 1 + horizon),))\nX[0], y[0]\n\nTrue\nTrue\n(12, 3, 8) (12,)\n\n\n(array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 0, 10, 20, 30, 40, 50, 60, 70],\n [ 0, 100, 200, 300, 400, 500, 600, 700]]),\n 8)\n\n\n\nwindow_len = 8\nhorizon = [1, 2]\nx_vars = 0\ny_vars = [1, 2]\nX, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)\nprint(np.shares_memory(X, df))\nprint(np.shares_memory(y, df))\nprint(X.shape, y.shape)\ntest_eq(X.shape, (len(df) - (window_len - 1 + max(horizon)), 1, window_len))\ntest_eq(y.shape, (len(df) - (window_len - 1 + max(horizon)), len(y_vars), len(horizon)))\nX[0], y[0]\n\nTrue\nFalse\n(11, 1, 8) (11, 2, 2)\n\n\n(array([[0, 1, 2, 3, 4, 5, 6, 7]]),\n array([[ 80, 90],\n [800, 900]]))\n\n\n\nwindow_len = [-4, -2, -1, 0]\nhorizon = [1, 2, 4]\nx_vars = 0\ny_vars = [1, 2]\nX, y = apply_sliding_window(df, window_len, horizon=horizon, x_vars=x_vars, y_vars=y_vars)\nprint(np.shares_memory(X, df))\nprint(np.shares_memory(y, df))\nprint(X.shape, y.shape)\ntest_eq(X.shape, (12, 1, 4))\ntest_eq(y.shape, (12, 2, 3))\nX[0], y[0]\n\nFalse\nFalse\n(12, 1, 4) (12, 2, 3)\n\n\n(array([[0, 2, 3, 4]]),\n array([[ 50, 60, 80],\n [500, 600, 800]]))\n\n\n\nsource\n\n\ndf2Xy\n\n df2Xy (df, sample_col=None, feat_col=None, data_cols=None,\n target_col=None, steps_in_rows=False, to3d=True, splits=None,\n sort_by=None, ascending=True, y_func=None, return_names=False)\n\nThis function allows you to transform a pandas dataframe into X and y numpy arrays that can be used to create a TSDataset. sample_col: column that uniquely identifies each sample. feat_col: used for multivariate datasets. It indicates which is the column that indicates the feature by row. data_col: indicates ths column/s where the data is located. If None, it means all columns (except the sample_col, feat_col, and target_col) target_col: indicates the column/s where the target is. steps_in_rows: flag to indicate if each step is in a different row or in a different column (default). to3d: turns X to 3d (including univariate time series) sort_by: this is used to pass any colum/s that are needed to sort the steps in the sequence. If you pass a sample_col and/ or feat_col these will be automatically used before the sort_by column/s, and you don’t need to add them to the sort_by column/s list. y_func: function used to calculate y for each sample (and target_col) return_names: flag to return the names of the columns from where X was generated\n\nsource\n\n\nsplit_Xy\n\n split_Xy (X, y=None, splits=None)\n\n\ndf = pd.DataFrame()\ndf['sample_id'] = np.array([1,1,1,2,2,2,3,3,3])\ndf['var1'] = df['sample_id'] * 10 + df.index.values\ndf['var2'] = df['sample_id'] * 100 + df.index.values\ndf\n\n\n\n\n\n\n\n\nsample_id\nvar1\nvar2\n\n\n\n\n0\n1\n10\n100\n\n\n1\n1\n11\n101\n\n\n2\n1\n12\n102\n\n\n3\n2\n23\n203\n\n\n4\n2\n24\n204\n\n\n5\n2\n25\n205\n\n\n6\n3\n36\n306\n\n\n7\n3\n37\n307\n\n\n8\n3\n38\n308\n\n\n\n\n\n\n\n\nX_df, y_df = df2Xy(df, sample_col='sample_id', steps_in_rows=True)\ntest_eq(X_df[0], np.array([[10, 11, 12], [100, 101, 102]]))\n\n\nn_samples = 1_000\nn_rows = 10_000\n\nsample_ids = np.arange(n_samples).repeat(n_rows//n_samples).reshape(-1,1)\nfeat_ids = np.tile(np.arange(n_rows // n_samples), n_samples).reshape(-1,1)\ncont = np.random.randn(n_rows, 6)\nind_cat = np.random.randint(0, 3, (n_rows, 1))\ntarget = np.array([0,1,2])[ind_cat]\nind_cat2 = np.random.randint(0, 3, (n_rows, 1))\ntarget2 = np.array([100,200,300])[ind_cat2]\ndata = np.concatenate([sample_ids, feat_ids, cont, target, target], -1)\ncolumns = ['sample_id', 'feat_id'] + (np.arange(6) + 1).astype(str).tolist() + ['target'] + ['target2']\ndf = pd.DataFrame(data, columns=columns)\nidx = random_choice(np.arange(len(df)), len(df), False)\nnew_dtypes = {'sample_id':np.int32, 'feat_id':np.int32, '1':np.float32, '2':np.float32, '3':np.float32, '4':np.float32, '5':np.float32, '6':np.float32}\ndf = df.astype(dtype=new_dtypes)\ndf = df.loc[idx].reset_index(drop=True)\ndf\n\n\n\n\n\n\n\n\nsample_id\nfeat_id\n1\n2\n3\n4\n5\n6\ntarget\ntarget2\n\n\n\n\n0\n625\n2\n-1.390549\n0.770179\n-0.848480\n0.853631\n-0.309984\n0.874338\n2.0\n2.0\n\n\n1\n526\n4\n1.152397\n2.064397\n-0.392603\n-0.275797\n-0.047526\n-2.248814\n2.0\n2.0\n\n\n2\n397\n6\n-1.052930\n0.631396\n-0.758800\n-0.606483\n-2.776054\n-0.457755\n1.0\n1.0\n\n\n3\n528\n8\n-0.178637\n-1.253319\n-1.154014\n0.913876\n1.051010\n-0.635762\n1.0\n1.0\n\n\n4\n249\n2\n0.612595\n0.888297\n0.065024\n1.621935\n-0.180479\n0.309977\n1.0\n1.0\n\n\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n...\n\n\n9995\n272\n1\n-0.432325\n1.645262\n1.502872\n-1.144859\n0.919653\n0.414304\n0.0\n0.0\n\n\n9996\n920\n5\n-0.724702\n-1.471832\n1.209086\n1.206532\n0.555676\n0.352726\n2.0\n2.0\n\n\n9997\n662\n6\n1.122043\n-0.379357\n-0.344517\n-1.545091\n0.187894\n1.062510\n2.0\n2.0\n\n\n9998\n71\n7\n-0.053582\n-0.854992\n-1.118632\n-1.967820\n-0.344804\n0.128105\n0.0\n0.0\n\n\n9999\n407\n4\n-1.565716\n-0.947183\n-0.401944\n-1.309024\n-0.237755\n-0.743251\n2.0\n2.0\n\n\n\n\n10000 rows × 10 columns\n\n\n\n\nfrom scipy.stats import mode\n\n\ndef y_func(o): return mode(o, axis=1, keepdims=True).mode\nX, y = df2xy(df, sample_col='sample_id', feat_col='feat_id', target_col=['target', 'target2'], sort_by=['sample_id', 'feat_id'], y_func=y_func)\ntest_eq(X.shape, (1000, 10, 6))\ntest_eq(y.shape, (1000, 2))\nrand_idx = np.random.randint(0, np.max(df.sample_id))\nsorted_df = df.sort_values(by=['sample_id', 'feat_id'], kind='stable').reset_index(drop=True)\ntest_eq(X[rand_idx], sorted_df[sorted_df.sample_id == rand_idx][['1', '2', '3', '4', '5', '6']].values)\ntest_eq(np.squeeze(mode(sorted_df[sorted_df.sample_id == rand_idx][['target', 'target2']].values).mode), y[rand_idx])\n\n\n# Univariate\nfrom io import StringIO\n\n\nTESTDATA = StringIO(\"\"\"sample_id;value_0;value_1;target\n rob;2;3;0\n alice;6;7;1\n eve;11;12;2\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\ndisplay(df)\nX, y = df2Xy(df, sample_col='sample_id', target_col='target', data_cols=['value_0', 'value_1'], sort_by='sample_id')\ntest_eq(X.shape, (3, 1, 2))\ntest_eq(y.shape, (3,))\nX, y\n\n\n\n\n\n\n\n\nsample_id\nvalue_0\nvalue_1\ntarget\n\n\n\n\n0\nrob\n2\n3\n0\n\n\n1\nalice\n6\n7\n1\n\n\n2\neve\n11\n12\n2\n\n\n\n\n\n\n\n(array([[[ 6, 7]],\n \n [[11, 12]],\n \n [[ 2, 3]]]),\n array([1, 2, 0]))\n\n\n\n# Univariate\nTESTDATA = StringIO(\"\"\"sample_id;timestep;values;target\n rob;1;2;0\n alice;1;6;1\n eve;1;11;2\n \n rob;2;3;0\n alice;2;7;1\n eve;2;12;2\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\ndisplay(df)\ndef y_func(o): return mode(o, axis=1).mode\nX, y = df2xy(df, sample_col='sample_id', target_col='target', data_cols=['values'], sort_by='timestep', to3d=True, y_func=y_func)\ntest_eq(X.shape, (3, 1, 2))\ntest_eq(y.shape, (3, ))\nprint(X, y)\n\n\n\n\n\n\n\n\nsample_id\ntimestep\nvalues\ntarget\n\n\n\n\n0\nrob\n1\n2\n0\n\n\n1\nalice\n1\n6\n1\n\n\n2\neve\n1\n11\n2\n\n\n3\nrob\n2\n3\n0\n\n\n4\nalice\n2\n7\n1\n\n\n5\neve\n2\n12\n2\n\n\n\n\n\n\n\n[[[ 6 7]]\n\n [[11 12]]\n\n [[ 2 3]]] [1 2 0]\n\n\n\n# Multivariate\nTESTDATA = StringIO(\"\"\"sample_id;trait;value_0;value_1;target\n rob;green;2;3;0\n rob;yellow;3;4;0\n rob;blue;4;5;0\n rob;red;5;6;0\n alice;green;6;7;1\n alice;yellow;7;8;1\n alice;blue;8;9;1\n alice;red;9;10;1\n eve;yellow;11;12;2\n eve;green;10;11;2\n eve;blue;12;12;2\n eve;red;13;14;2\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\nidx = random_choice(len(df), len(df), False)\ndf = df.iloc[idx]\ndisplay(df)\ndef y_func(o): return mode(o, axis=1).mode\nX, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col='target', data_cols=['value_0', 'value_1'], y_func=y_func)\nprint(X, y)\ntest_eq(X.shape, (3, 4, 2))\ntest_eq(y.shape, (3,))\n\n\n\n\n\n\n\n\nsample_id\ntrait\nvalue_0\nvalue_1\ntarget\n\n\n\n\n9\neve\ngreen\n10\n11\n2\n\n\n10\neve\nblue\n12\n12\n2\n\n\n3\nrob\nred\n5\n6\n0\n\n\n0\nrob\ngreen\n2\n3\n0\n\n\n6\nalice\nblue\n8\n9\n1\n\n\n2\nrob\nblue\n4\n5\n0\n\n\n1\nrob\nyellow\n3\n4\n0\n\n\n4\nalice\ngreen\n6\n7\n1\n\n\n7\nalice\nred\n9\n10\n1\n\n\n8\neve\nyellow\n11\n12\n2\n\n\n11\neve\nred\n13\n14\n2\n\n\n5\nalice\nyellow\n7\n8\n1\n\n\n\n\n\n\n\n[[[ 8 9]\n [ 6 7]\n [ 9 10]\n [ 7 8]]\n\n [[12 12]\n [10 11]\n [13 14]\n [11 12]]\n\n [[ 4 5]\n [ 2 3]\n [ 5 6]\n [ 3 4]]] [1 2 0]\n\n\n\n# Multivariate, multi-label\nTESTDATA = StringIO(\"\"\"sample_id;trait;value_0;value_1;target1;target2\n rob;green;2;3;0;0\n rob;yellow;3;4;0;0\n rob;blue;4;5;0;0\n rob;red;5;6;0;0\n alice;green;6;7;1;0\n alice;yellow;7;8;1;0\n alice;blue;8;9;1;0\n alice;red;9;10;1;0\n eve;yellow;11;12;2;1\n eve;green;10;11;2;1\n eve;blue;12;12;2;1\n eve;red;13;14;2;1\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\ndisplay(df)\ndef y_func(o): return mode(o, axis=1, keepdims=True).mode\nX, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col=['target1', 'target2'], data_cols=['value_0', 'value_1'], y_func=y_func)\ntest_eq(X.shape, (3, 4, 2))\ntest_eq(y.shape, (3, 2))\nprint(X, y)\n\n\n\n\n\n\n\n\nsample_id\ntrait\nvalue_0\nvalue_1\ntarget1\ntarget2\n\n\n\n\n0\nrob\ngreen\n2\n3\n0\n0\n\n\n1\nrob\nyellow\n3\n4\n0\n0\n\n\n2\nrob\nblue\n4\n5\n0\n0\n\n\n3\nrob\nred\n5\n6\n0\n0\n\n\n4\nalice\ngreen\n6\n7\n1\n0\n\n\n5\nalice\nyellow\n7\n8\n1\n0\n\n\n6\nalice\nblue\n8\n9\n1\n0\n\n\n7\nalice\nred\n9\n10\n1\n0\n\n\n8\neve\nyellow\n11\n12\n2\n1\n\n\n9\neve\ngreen\n10\n11\n2\n1\n\n\n10\neve\nblue\n12\n12\n2\n1\n\n\n11\neve\nred\n13\n14\n2\n1\n\n\n\n\n\n\n\n[[[ 8 9]\n [ 6 7]\n [ 9 10]\n [ 7 8]]\n\n [[12 12]\n [10 11]\n [13 14]\n [11 12]]\n\n [[ 4 5]\n [ 2 3]\n [ 5 6]\n [ 3 4]]] [[1 0]\n [2 1]\n [0 0]]\n\n\n\n# Multivariate, unlabeled\nTESTDATA = StringIO(\"\"\"sample_id;trait;value_0;value_1;target\n rob;green;2;3;0\n rob;yellow;3;4;0\n rob;blue;4;5;0\n rob;red;5;6;0\n alice;green;6;7;1\n alice;yellow;7;8;1\n alice;blue;8;9;1\n alice;red;9;10;1\n eve;yellow;11;12;2\n eve;green;10;11;2\n eve;blue;12;12;2\n eve;red;13;14;2\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\nidx = random_choice(len(df), len(df), False)\ndf = df.iloc[idx]\ndisplay(df)\ndef y_func(o): return mode(o, axis=1, keepdims=True).mode\nX, y = df2xy(df, sample_col='sample_id', feat_col='trait', data_cols=['value_0', 'value_1'], y_func=y_func)\nprint(X, y)\ntest_eq(X.shape, (3, 4, 2))\ntest_eq(y, None)\n\n\n\n\n\n\n\n\nsample_id\ntrait\nvalue_0\nvalue_1\ntarget\n\n\n\n\n11\neve\nred\n13\n14\n2\n\n\n3\nrob\nred\n5\n6\n0\n\n\n9\neve\ngreen\n10\n11\n2\n\n\n10\neve\nblue\n12\n12\n2\n\n\n6\nalice\nblue\n8\n9\n1\n\n\n1\nrob\nyellow\n3\n4\n0\n\n\n4\nalice\ngreen\n6\n7\n1\n\n\n2\nrob\nblue\n4\n5\n0\n\n\n0\nrob\ngreen\n2\n3\n0\n\n\n8\neve\nyellow\n11\n12\n2\n\n\n7\nalice\nred\n9\n10\n1\n\n\n5\nalice\nyellow\n7\n8\n1\n\n\n\n\n\n\n\n[[[ 8 9]\n [ 6 7]\n [ 9 10]\n [ 7 8]]\n\n [[12 12]\n [10 11]\n [13 14]\n [11 12]]\n\n [[ 4 5]\n [ 2 3]\n [ 5 6]\n [ 3 4]]] None\n\n\n\nTESTDATA = StringIO(\"\"\"sample_id;trait;timestep;values;target\n rob;green;1;2;0\n rob;yellow;1;3;0\n rob;blue;1;4;0\n rob;red;1;5;0\n alice;green;1;6;1\n alice;yellow;1;7;1\n alice;blue;1;8;1\n alice;red;1;9;1\n eve;yellow;1;11;2\n eve;green;1;10;2\n eve;blue;1;12;2\n eve;red;1;13;2\n \n rob;green;2;3;0\n rob;yellow;2;4;0\n rob;blue;2;5;0\n rob;red;2;6;0\n alice;green;2;7;1\n alice;yellow;2;8;1\n alice;blue;2;9;1\n alice;red;2;10;1\n eve;yellow;2;12;2\n eve;green;2;11;2\n eve;blue;2;13;2\n eve;red;2;14;2\n \"\"\")\n\ndf = pd.read_csv(TESTDATA, sep=\";\")\ndisplay(df)\ndef y_func(o): return mode(o, axis=1).mode\nX, y = df2xy(df, sample_col='sample_id', feat_col='trait', sort_by='timestep', target_col='target', data_cols=['values'], y_func=y_func)\nprint(X, y)\ntest_eq(X.shape, (3, 4, 2))\ntest_eq(y.shape, (3, ))\n\n\n\n\n\n\n\n\nsample_id\ntrait\ntimestep\nvalues\ntarget\n\n\n\n\n0\nrob\ngreen\n1\n2\n0\n\n\n1\nrob\nyellow\n1\n3\n0\n\n\n2\nrob\nblue\n1\n4\n0\n\n\n3\nrob\nred\n1\n5\n0\n\n\n4\nalice\ngreen\n1\n6\n1\n\n\n5\nalice\nyellow\n1\n7\n1\n\n\n6\nalice\nblue\n1\n8\n1\n\n\n7\nalice\nred\n1\n9\n1\n\n\n8\neve\nyellow\n1\n11\n2\n\n\n9\neve\ngreen\n1\n10\n2\n\n\n10\neve\nblue\n1\n12\n2\n\n\n11\neve\nred\n1\n13\n2\n\n\n12\nrob\ngreen\n2\n3\n0\n\n\n13\nrob\nyellow\n2\n4\n0\n\n\n14\nrob\nblue\n2\n5\n0\n\n\n15\nrob\nred\n2\n6\n0\n\n\n16\nalice\ngreen\n2\n7\n1\n\n\n17\nalice\nyellow\n2\n8\n1\n\n\n18\nalice\nblue\n2\n9\n1\n\n\n19\nalice\nred\n2\n10\n1\n\n\n20\neve\nyellow\n2\n12\n2\n\n\n21\neve\ngreen\n2\n11\n2\n\n\n22\neve\nblue\n2\n13\n2\n\n\n23\neve\nred\n2\n14\n2\n\n\n\n\n\n\n\n[[[ 8 9]\n [ 6 7]\n [ 9 10]\n [ 7 8]]\n\n [[12 13]\n [10 11]\n [13 14]\n [11 12]]\n\n [[ 4 5]\n [ 2 3]\n [ 5 6]\n [ 3 4]]] [1 2 0]\n\n\n\nsource\n\n\ndf2np3d\n\n df2np3d (df, groupby, data_cols=None)\n\nTransforms a df (with the same number of rows per group in groupby) to a 3d ndarray\n\nuser = np.array([1,2]).repeat(4).reshape(-1,1)\nval = np.random.rand(8, 3)\ndata = np.concatenate([user, val], axis=-1)\ndf = pd.DataFrame(data, columns=['user', 'x1', 'x2', 'x3'])\ntest_eq(df2np3d(df, ['user'], ['x1', 'x2', 'x3']).shape, (2, 3, 4))\n\n\nsource\n\n\nadd_missing_value_cols\n\n add_missing_value_cols (df, cols=None, dtype=<class 'float'>,\n fill_value=None)\n\n\ndata = np.random.randn(10, 2)\nmask = data > .8\ndata[mask] = np.nan\ndf = pd.DataFrame(data, columns=['A', 'B'])\ndf = add_missing_value_cols(df, cols=None, dtype=float)\ntest_eq(df['A'].isnull().sum(), df['missing_A'].sum())\ntest_eq(df['B'].isnull().sum(), df['missing_B'].sum())\ndf\n\n\n\n\n\n\n\n\nA\nB\nmissing_A\nmissing_B\n\n\n\n\n0\n0.476712\n-0.880797\n0.0\n0.0\n\n\n1\nNaN\n-1.517210\n1.0\n0.0\n\n\n2\n-1.348997\n-0.878441\n0.0\n0.0\n\n\n3\nNaN\n0.290756\n1.0\n0.0\n\n\n4\n0.569218\n-1.415777\n0.0\n0.0\n\n\n5\n0.591641\n-2.133860\n0.0\n0.0\n\n\n6\nNaN\nNaN\n1.0\n1.0\n\n\n7\nNaN\n-0.119397\n1.0\n0.0\n\n\n8\n-0.727988\n0.057254\n0.0\n0.0\n\n\n9\n-0.631352\n-0.219028\n0.0\n0.0\n\n\n\n\n\n\n\n\nsource\n\n\nadd_missing_timestamps\n\n add_missing_timestamps (df, datetime_col=None, use_index=False,\n unique_id_cols=None, groupby=None,\n fill_value=nan, range_by_group=True,\n start_date=None, end_date=None, freq=None)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\npandas DataFrame\n\n\ndatetime_col\nNoneType\nNone\ncolumn that contains the datetime data (without duplicates within groups)\n\n\nuse_index\nbool\nFalse\nindicates if the index contains the datetime data\n\n\nunique_id_cols\nNoneType\nNone\ncolumn used to identify unique_ids\n\n\ngroupby\nNoneType\nNone\nsame as unique_id_cols. Will be deprecated. Kept for compatiblity.\n\n\nfill_value\nfloat\nnan\nvalues that will be insert where missing dates exist. Default:np.nan\n\n\nrange_by_group\nbool\nTrue\nif True, dates will be filled between min and max dates for each group. Otherwise, between the min and max dates in the df.\n\n\nstart_date\nNoneType\nNone\nstart date to fill in missing dates (same for all unique_ids)\n\n\nend_date\nNoneType\nNone\nend date to fill in missing dates (same for all unique_ids)\n\n\nfreq\nNoneType\nNone\nfrequency used to fill in the missing datetime\n\n\n\n\n# Filling dates between min and max dates\ndates = pd.date_range('2021-05-01', '2021-05-07').values\ndata = np.zeros((len(dates), 3))\ndata[:, 0] = dates\ndata[:, 1] = np.random.rand(len(dates))\ndata[:, 2] = np.random.rand(len(dates))\ncols = ['date', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)\ndate_df_with_missing_dates\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01\n0.537248\n0.670897\n\n\n1\n2021-05-03\n0.299912\n0.421039\n\n\n2\n2021-05-05\n0.648372\n0.204641\n\n\n3\n2021-05-06\n0.017475\n0.022183\n\n\n4\n2021-05-07\n0.965919\n0.470055\n\n\n\n\n\n\n\n\n# No groups\nexpected_output_df = date_df.copy()\nexpected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(), \n 'date', \n unique_id_cols=None, \n fill_value=np.nan, \n range_by_group=False)\ntest_eq(output_df, expected_output_df)\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01\n0.537248\n0.670897\n\n\n1\n2021-05-02\nNaN\nNaN\n\n\n2\n2021-05-03\n0.299912\n0.421039\n\n\n3\n2021-05-04\nNaN\nNaN\n\n\n4\n2021-05-05\n0.648372\n0.204641\n\n\n5\n2021-05-06\n0.017475\n0.022183\n\n\n6\n2021-05-07\n0.965919\n0.470055\n\n\n\n\n\n\n\n\n# Filling dates between min and max dates for each value in groupby column\ndates = pd.date_range('2021-05-01', '2021-05-07').values\ndates = np.concatenate((dates, dates))\ndata = np.zeros((len(dates), 4))\ndata[:, 0] = dates\ndata[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))\ndata[:, 2] = np.random.rand(len(dates))\ndata[:, 3] = np.random.rand(len(dates))\ncols = ['date', 'id', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([0,1,3,8,11,13]).reset_index(drop=True)\ndate_df_with_missing_dates\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-03\n0\n0.059398\n0.255853\n\n\n1\n2021-05-05\n0\n0.235536\n0.455261\n\n\n2\n2021-05-06\n0\n0.724423\n0.280910\n\n\n3\n2021-05-07\n0\n0.303682\n0.853959\n\n\n4\n2021-05-01\n1\n0.022424\n0.408510\n\n\n5\n2021-05-03\n1\n0.508190\n0.603880\n\n\n6\n2021-05-04\n1\n0.330924\n0.108156\n\n\n7\n2021-05-06\n1\n0.601481\n0.020182\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=True\nexpected_output_df = date_df.drop([0,1,13]).reset_index(drop=True) \nexpected_output_df.loc[[1,6,9], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(), \n 'date', \n unique_id_cols='id', \n fill_value=np.nan, \n range_by_group=True)\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-03\n0\n0.059398\n0.255853\n\n\n1\n2021-05-04\n0\nNaN\nNaN\n\n\n2\n2021-05-05\n0\n0.235536\n0.455261\n\n\n3\n2021-05-06\n0\n0.724423\n0.280910\n\n\n4\n2021-05-07\n0\n0.303682\n0.853959\n\n\n5\n2021-05-01\n1\n0.022424\n0.408510\n\n\n6\n2021-05-02\n1\nNaN\nNaN\n\n\n7\n2021-05-03\n1\n0.508190\n0.603880\n\n\n8\n2021-05-04\n1\n0.330924\n0.108156\n\n\n9\n2021-05-05\n1\nNaN\nNaN\n\n\n10\n2021-05-06\n1\n0.601481\n0.020182\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=False\nexpected_output_df = date_df.copy() \nexpected_output_df.loc[[0,1,3,8,11,13], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(), \n 'date', \n unique_id_cols='id', \n fill_value=np.nan, \n range_by_group=False)\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01\n0\nNaN\nNaN\n\n\n1\n2021-05-02\n0\nNaN\nNaN\n\n\n2\n2021-05-03\n0\n0.059398\n0.255853\n\n\n3\n2021-05-04\n0\nNaN\nNaN\n\n\n4\n2021-05-05\n0\n0.235536\n0.455261\n\n\n5\n2021-05-06\n0\n0.724423\n0.280910\n\n\n6\n2021-05-07\n0\n0.303682\n0.853959\n\n\n7\n2021-05-01\n1\n0.022424\n0.408510\n\n\n8\n2021-05-02\n1\nNaN\nNaN\n\n\n9\n2021-05-03\n1\n0.508190\n0.603880\n\n\n10\n2021-05-04\n1\n0.330924\n0.108156\n\n\n11\n2021-05-05\n1\nNaN\nNaN\n\n\n12\n2021-05-06\n1\n0.601481\n0.020182\n\n\n13\n2021-05-07\n1\nNaN\nNaN\n\n\n\n\n\n\n\n\n# Filling dates between min and max timestamps\ndates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values\ndata = np.zeros((len(dates), 3))\ndata[:, 0] = dates\ndata[:, 1] = np.random.rand(len(dates))\ndata[:, 2] = np.random.rand(len(dates))\ncols = ['date', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)\ndate_df_with_missing_dates\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 00:00:00\n0.774846\n0.624488\n\n\n1\n2021-05-01 08:00:00\n0.683837\n0.441230\n\n\n2\n2021-05-01 16:00:00\n0.142269\n0.279095\n\n\n3\n2021-05-01 20:00:00\n0.953686\n0.205123\n\n\n\n\n\n\n\n\n# No groups\nexpected_output_df = date_df.copy()\nexpected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(), 'date', groupby=None, fill_value=np.nan, range_by_group=False, freq='4H')\ntest_eq(output_df, expected_output_df)\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 00:00:00\n0.774846\n0.624488\n\n\n1\n2021-05-01 04:00:00\nNaN\nNaN\n\n\n2\n2021-05-01 08:00:00\n0.683837\n0.441230\n\n\n3\n2021-05-01 12:00:00\nNaN\nNaN\n\n\n4\n2021-05-01 16:00:00\n0.142269\n0.279095\n\n\n5\n2021-05-01 20:00:00\n0.953686\n0.205123\n\n\n\n\n\n\n\n\n# Filling missing values between min and max timestamps for each value in groupby column\n\ndates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values\ndates = np.concatenate((dates, dates))\ndata = np.zeros((len(dates), 4))\ndata[:, 0] = dates\ndata[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))\ndata[:, 2] = np.random.rand(len(dates))\ndata[:, 3] = np.random.rand(len(dates))\ncols = ['date', 'id', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([0,1,3,8,9,11]).reset_index(drop=True)\ndate_df_with_missing_dates\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 08:00:00\n0\n0.438784\n0.084472\n\n\n1\n2021-05-01 16:00:00\n0\n0.059613\n0.445215\n\n\n2\n2021-05-01 20:00:00\n0\n0.511807\n0.001034\n\n\n3\n2021-05-01 00:00:00\n1\n0.970115\n0.280121\n\n\n4\n2021-05-01 04:00:00\n1\n0.775051\n0.436359\n\n\n5\n2021-05-01 16:00:00\n1\n0.469987\n0.457442\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=True\nexpected_output_df = date_df.drop([0,1,11]).reset_index(drop=True) \nexpected_output_df.loc[[1,6,7], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(),\n 'date', \n groupby='id', \n fill_value=np.nan, \n range_by_group=True, \n freq='4H')\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 08:00:00\n0\n0.438784\n0.084472\n\n\n1\n2021-05-01 12:00:00\n0\nNaN\nNaN\n\n\n2\n2021-05-01 16:00:00\n0\n0.059613\n0.445215\n\n\n3\n2021-05-01 20:00:00\n0\n0.511807\n0.001034\n\n\n4\n2021-05-01 00:00:00\n1\n0.970115\n0.280121\n\n\n5\n2021-05-01 04:00:00\n1\n0.775051\n0.436359\n\n\n6\n2021-05-01 08:00:00\n1\nNaN\nNaN\n\n\n7\n2021-05-01 12:00:00\n1\nNaN\nNaN\n\n\n8\n2021-05-01 16:00:00\n1\n0.469987\n0.457442\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=False\nexpected_output_df = date_df.copy() \nexpected_output_df.loc[[0,1,3,8,9,11], ['feature1', 'feature2']] = np.nan\ndisplay(expected_output_df)\noutput_df = add_missing_timestamps(date_df_with_missing_dates.copy(), \n 'date', \n groupby='id', \n fill_value=np.nan, \n range_by_group=False, \n freq='4H')\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 00:00:00\n0\nNaN\nNaN\n\n\n1\n2021-05-01 04:00:00\n0\nNaN\nNaN\n\n\n2\n2021-05-01 08:00:00\n0\n0.438784\n0.084472\n\n\n3\n2021-05-01 12:00:00\n0\nNaN\nNaN\n\n\n4\n2021-05-01 16:00:00\n0\n0.059613\n0.445215\n\n\n5\n2021-05-01 20:00:00\n0\n0.511807\n0.001034\n\n\n6\n2021-05-01 00:00:00\n1\n0.970115\n0.280121\n\n\n7\n2021-05-01 04:00:00\n1\n0.775051\n0.436359\n\n\n8\n2021-05-01 08:00:00\n1\nNaN\nNaN\n\n\n9\n2021-05-01 12:00:00\n1\nNaN\nNaN\n\n\n10\n2021-05-01 16:00:00\n1\n0.469987\n0.457442\n\n\n11\n2021-05-01 20:00:00\n1\nNaN\nNaN\n\n\n\n\n\n\n\n\n# No groups, with duplicate dates ==> FAILS\ndates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values\ndata = np.zeros((len(dates), 3))\ndata[:, 0] = dates\ndata[:, 1] = np.random.rand(len(dates))\ndata[:, 2] = np.random.rand(len(dates))\ncols = ['date', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)\ndate_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']\ndisplay(date_df_with_missing_dates)\ntest_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby=None, fill_value=np.nan, range_by_group=False, freq='4H'), )\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 00:00:00\n0.755092\n0.002068\n\n\n1\n2021-05-01 08:00:00\n0.570693\n0.087019\n\n\n2\n2021-05-01 16:00:00\n0.228869\n0.856618\n\n\n3\n2021-05-01 16:00:00\n0.349506\n0.428253\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=True, with duplicate dates ==> FAILS\n\ndates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values\ndates = np.concatenate((dates, dates))\ndata = np.zeros((len(dates), 4))\ndata[:, 0] = dates\ndata[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))\ndata[:, 2] = np.random.rand(len(dates))\ndata[:, 3] = np.random.rand(len(dates))\ncols = ['date', 'id', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)\ndate_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']\ndisplay(date_df_with_missing_dates)\ntest_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=True, freq='4H'), \n contains='cannot handle a non-unique multi-index!')\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 08:00:00\n0\n0.040345\n0.312874\n\n\n1\n2021-05-01 12:00:00\n0\n0.713424\n0.597211\n\n\n2\n2021-05-01 16:00:00\n0\n0.468382\n0.652314\n\n\n3\n2021-05-01 16:00:00\n0\n0.396691\n0.605664\n\n\n4\n2021-05-01 00:00:00\n1\n0.804646\n0.964115\n\n\n5\n2021-05-01 04:00:00\n1\n0.089925\n0.072410\n\n\n6\n2021-05-01 16:00:00\n1\n0.830786\n0.560658\n\n\n\n\n\n\n\n\n# groupby='id', range_by_group=FALSE, with duplicate dates ==> FAILS\n\ndates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values\ndates = np.concatenate((dates, dates))\ndata = np.zeros((len(dates), 4))\ndata[:, 0] = dates\ndata[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))\ndata[:, 2] = np.random.rand(len(dates))\ndata[:, 3] = np.random.rand(len(dates))\ncols = ['date', 'id', 'feature1', 'feature2']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})\ndate_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)\ndate_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']\ndisplay(date_df_with_missing_dates)\ntest_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=False, freq='4H'), \n contains='cannot handle a non-unique multi-index!')\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\n\n\n\n\n0\n2021-05-01 08:00:00\n0\n0.448508\n0.953596\n\n\n1\n2021-05-01 12:00:00\n0\n0.868802\n0.526845\n\n\n2\n2021-05-01 16:00:00\n0\n0.223070\n0.304842\n\n\n3\n2021-05-01 16:00:00\n0\n0.645661\n0.270956\n\n\n4\n2021-05-01 00:00:00\n1\n0.017250\n0.787757\n\n\n5\n2021-05-01 04:00:00\n1\n0.783341\n0.608269\n\n\n6\n2021-05-01 16:00:00\n1\n0.426247\n0.926149\n\n\n\n\n\n\n\n\nsource\n\n\ntime_encoding\n\n time_encoding (series, freq, max_val=None)\n\nTransforms a pandas series of dtype datetime64 (of any freq) or DatetimeIndex into 2 float arrays\nAvailable options: microsecond, millisecond, second, minute, hour, day = day_of_month = dayofmonth, day_of_week = weekday = dayofweek, day_of_year = dayofyear, week = week_of_year = weekofyear, month and year\n\nfor freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:\n tdf = pd.DataFrame(pd.date_range('2021-03-01', dt.datetime.today()), columns=['date'])\n a,b = time_encoding(tdf.date, freq=freq)\n plt.plot(a)\n plt.plot(b)\n plt.title(freq)\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfor freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:\n dateindex = pd.date_range('2021-03-01', dt.datetime.today())\n a,b = time_encoding(dateindex, freq=freq)\n plt.plot(a)\n plt.plot(b)\n plt.title(freq)\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndow_sin, dow_cos = time_encoding(date_df['date'], 'dayofweek')\nplt.plot(dow_sin)\nplt.plot(dow_cos)\nplt.title('DayOfWeek')\nplt.show()\ndate_df['dow_sin'] = dow_sin\ndate_df['dow_cos'] = dow_cos\ndate_df\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature2\ndow_sin\ndow_cos\n\n\n\n\n0\n2021-05-01 00:00:00\n0\n0.773597\n0.465634\n-0.974928\n-0.222521\n\n\n1\n2021-05-01 04:00:00\n0\n0.265526\n0.963753\n-0.974928\n-0.222521\n\n\n2\n2021-05-01 08:00:00\n0\n0.448508\n0.953596\n-0.974928\n-0.222521\n\n\n3\n2021-05-01 12:00:00\n0\n0.868802\n0.526845\n-0.974928\n-0.222521\n\n\n4\n2021-05-01 16:00:00\n0\n0.223070\n0.304842\n-0.974928\n-0.222521\n\n\n5\n2021-05-01 20:00:00\n0\n0.645661\n0.270956\n-0.974928\n-0.222521\n\n\n6\n2021-05-01 00:00:00\n1\n0.017250\n0.787757\n-0.974928\n-0.222521\n\n\n7\n2021-05-01 04:00:00\n1\n0.783341\n0.608269\n-0.974928\n-0.222521\n\n\n8\n2021-05-01 08:00:00\n1\n0.629875\n0.170726\n-0.974928\n-0.222521\n\n\n9\n2021-05-01 12:00:00\n1\n0.302927\n0.682136\n-0.974928\n-0.222521\n\n\n10\n2021-05-01 16:00:00\n1\n0.426247\n0.926149\n-0.974928\n-0.222521\n\n\n11\n2021-05-01 20:00:00\n1\n0.830624\n0.543715\n-0.974928\n-0.222521\n\n\n\n\n\n\n\n\nsource\n\n\nget_gaps\n\n get_gaps (o:torch.Tensor, forward:bool=True, backward:bool=True,\n nearest:bool=True, normalize:bool=True)\n\nNumber of sequence steps from previous, to next and/or to nearest real value along the last dimension of 3D arrays or tensors\n\nsource\n\n\nnearest_gaps\n\n nearest_gaps (o, normalize=True)\n\nNumber of sequence steps to nearest real value along the last dimension of 3D arrays or tensors\n\nsource\n\n\nbackward_gaps\n\n backward_gaps (o, normalize=True)\n\nNumber of sequence steps to next real value along the last dimension of 3D arrays or tensors\n\nsource\n\n\nforward_gaps\n\n forward_gaps (o, normalize=True)\n\nNumber of sequence steps since previous real value along the last dimension of 3D arrays or tensors\n\nt = torch.rand(1, 2, 8)\narr = t.numpy()\nt[t <.6] = np.nan\ntest_ge(nearest_gaps(t).min().item(), 0)\ntest_ge(nearest_gaps(arr).min(), 0)\ntest_le(nearest_gaps(t).min().item(), 1)\ntest_le(nearest_gaps(arr).min(), 1)\ntest_eq(torch.isnan(forward_gaps(t)).sum(), 0)\ntest_eq(np.isnan(forward_gaps(arr)).sum(), 0)\nag = get_gaps(t)\ntest_eq(ag.shape, (1,6,8))\ntest_eq(torch.isnan(ag).sum(), 0)\n\n\nsource\n\n\nadd_delta_timestamp_cols\n\n add_delta_timestamp_cols (df, cols=None, groupby=None, forward=True,\n backward=True, nearest=True, normalize=True)\n\n\n# Add delta timestamp features for the no groups setting\ndates = pd.date_range('2021-05-01', '2021-05-07').values\ndata = np.zeros((len(dates), 2))\ndata[:, 0] = dates\ndata[:, 1] = np.random.rand(len(dates))\n\ncols = ['date', 'feature1']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float})\ndate_df.loc[[1,3,4],'feature1'] = np.nan\ndate_df\n\n\n\n\n\n\n\n\ndate\nfeature1\n\n\n\n\n0\n2021-05-01\n0.132532\n\n\n1\n2021-05-02\nNaN\n\n\n2\n2021-05-03\n0.403176\n\n\n3\n2021-05-04\nNaN\n\n\n4\n2021-05-05\nNaN\n\n\n5\n2021-05-06\n0.179554\n\n\n6\n2021-05-07\n0.446536\n\n\n\n\n\n\n\n\n# No groups\nexpected_output_df = date_df.copy()\nexpected_output_df['feature1_dt_fwd'] = np.array([1,1,2,1,2,3,1])\nexpected_output_df['feature1_dt_bwd'] = np.array([2,1,3,2,1,1,1])\nexpected_output_df['feature1_dt_nearest'] = np.array([1,1,2,1,1,1,1])\n\ndisplay(expected_output_df)\noutput_df = add_delta_timestamp_cols(date_df, cols='feature1', normalize=False)\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nfeature1\nfeature1_dt_fwd\nfeature1_dt_bwd\nfeature1_dt_nearest\n\n\n\n\n0\n2021-05-01\n0.132532\n1\n2\n1\n\n\n1\n2021-05-02\nNaN\n1\n1\n1\n\n\n2\n2021-05-03\n0.403176\n2\n3\n2\n\n\n3\n2021-05-04\nNaN\n1\n2\n1\n\n\n4\n2021-05-05\nNaN\n2\n1\n1\n\n\n5\n2021-05-06\n0.179554\n3\n1\n1\n\n\n6\n2021-05-07\n0.446536\n1\n1\n1\n\n\n\n\n\n\n\n\n# Add delta timestamp features within a group\ndates = pd.date_range('2021-05-01', '2021-05-07').values\ndates = np.concatenate((dates, dates))\ndata = np.zeros((len(dates), 3))\ndata[:, 0] = dates\ndata[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))\ndata[:, 2] = np.random.rand(len(dates))\n\ncols = ['date', 'id', 'feature1']\ndate_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float})\ndate_df.loc[[1,3,4,8,9,11],'feature1'] = np.nan\ndate_df\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\n\n\n\n\n0\n2021-05-01\n0\n0.405327\n\n\n1\n2021-05-02\n0\nNaN\n\n\n2\n2021-05-03\n0\n0.055934\n\n\n3\n2021-05-04\n0\nNaN\n\n\n4\n2021-05-05\n0\nNaN\n\n\n5\n2021-05-06\n0\n0.698408\n\n\n6\n2021-05-07\n0\n0.064831\n\n\n7\n2021-05-01\n1\n0.407541\n\n\n8\n2021-05-02\n1\nNaN\n\n\n9\n2021-05-03\n1\nNaN\n\n\n10\n2021-05-04\n1\n0.113590\n\n\n11\n2021-05-05\n1\nNaN\n\n\n12\n2021-05-06\n1\n0.548088\n\n\n13\n2021-05-07\n1\n0.348813\n\n\n\n\n\n\n\n\n# groupby='id'\nexpected_output_df = date_df.copy()\nexpected_output_df['feature1_dt_fwd'] = np.array([1,1,2,1,2,3,1,1,1,2,3,1,2,1])\nexpected_output_df['feature1_dt_bwd'] = np.array([2,1,3,2,1,1,1,3,2,1,2,1,1,1])\nexpected_output_df['feature1_dt_nearest'] = np.array([1,1,2,1,1,1,1,1,1,1,2,1,1,1])\n\ndisplay(expected_output_df)\noutput_df = add_delta_timestamp_cols(date_df, cols='feature1', groupby='id', normalize=False)\ntest_eq(expected_output_df, output_df)\n\n\n\n\n\n\n\n\ndate\nid\nfeature1\nfeature1_dt_fwd\nfeature1_dt_bwd\nfeature1_dt_nearest\n\n\n\n\n0\n2021-05-01\n0\n0.405327\n1\n2\n1\n\n\n1\n2021-05-02\n0\nNaN\n1\n1\n1\n\n\n2\n2021-05-03\n0\n0.055934\n2\n3\n2\n\n\n3\n2021-05-04\n0\nNaN\n1\n2\n1\n\n\n4\n2021-05-05\n0\nNaN\n2\n1\n1\n\n\n5\n2021-05-06\n0\n0.698408\n3\n1\n1\n\n\n6\n2021-05-07\n0\n0.064831\n1\n1\n1\n\n\n7\n2021-05-01\n1\n0.407541\n1\n3\n1\n\n\n8\n2021-05-02\n1\nNaN\n1\n2\n1\n\n\n9\n2021-05-03\n1\nNaN\n2\n1\n1\n\n\n10\n2021-05-04\n1\n0.113590\n3\n2\n2\n\n\n11\n2021-05-05\n1\nNaN\n1\n1\n1\n\n\n12\n2021-05-06\n1\n0.548088\n2\n1\n1\n\n\n13\n2021-05-07\n1\n0.348813\n1\n1\n1\n\n\n\n\n\n\n\nSlidingWindow and SlidingWindowPanel are 2 useful functions that will allow you to create an array with segments of a pandas dataframe based on multiple criteria.\n\nsource\n\n\nSlidingWindow\n\n SlidingWindow (window_len:int, stride:Optional[int]=1, start:int=0,\n pad_remainder:bool=False, padding:str='post',\n padding_value:float=nan, add_padding_feature:bool=True,\n get_x:Union[NoneType,int,list]=None,\n get_y:Union[NoneType,int,list]=None,\n y_func:Optional[<built-infunctioncallable>]=None,\n output_processor:Optional[<built-\n infunctioncallable>]=None, copy:bool=False,\n horizon:Union[int,list]=1, seq_first:bool=True,\n sort_by:Optional[list]=None, ascending:bool=True,\n check_leakage:bool=True)\n\nApplies a sliding window to a 1d or 2d input (np.ndarray, torch.Tensor or pd.DataFrame)\nInput:\n You can use np.ndarray, pd.DataFrame or torch.Tensor as input\n\n shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nwindow_len\nint\n\nlength of lookback window\n\n\nstride\nUnion[None, int]\n1\nn datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap)\n\n\nstart\nint\n0\ndetermines the step where the first window is applied: 0 (default) or a given step (int). Previous steps will be discarded.\n\n\npad_remainder\nbool\nFalse\nallows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data).\n\n\npadding\nstr\npost\n‘pre’ or ‘post’ (optional, defaults to ‘pre’): pad either before or after each sequence. If pad_remainder == False, it indicates the starting point to create the sequence (‘pre’ from the end, and ‘post’ from the beginning)\n\n\npadding_value\nfloat\nnan\nvalue (float) that will be used for padding. Default: np.nan\n\n\nadd_padding_feature\nbool\nTrue\nadd an additional feature indicating whether each timestep is padded (1) or not (0).\n\n\nget_x\nUnion[None, int, list]\nNone\nindices of columns that contain the independent variable (xs). If None, all data will be used as x.\n\n\nget_y\nUnion[None, int, list]\nNone\nindices of columns that contain the target (ys). If None, all data will be used as y. [] means no y data is created (unlabeled data).\n\n\ny_func\nOptional[callable]\nNone\noptional function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1!\n\n\noutput_processor\nOptional[callable]\nNone\noptional function to process the final output (X (and y if available)). This is useful when some values need to be removed.The function should take X and y (even if it’s None) as arguments.\n\n\ncopy\nbool\nFalse\ncopy the original object to avoid changes in it.\n\n\nhorizon\nUnion[int, list]\n1\nnumber of future datapoints to predict (y). If get_y is [] horizon will be set to 0.\n\n\nseq_first\nbool\nTrue\nTrue if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len)\n\n\nsort_by\nOptional[list]\nNone\ncolumn/s used for sorting the array in ascending order\n\n\nascending\nbool\nTrue\nused in sorting\n\n\ncheck_leakage\nbool\nTrue\nchecks if there’s leakage in the output between X and y\n\n\n\n\nwl = 5\nstride = 5\n\nt = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)\nX\n\ninput shape: (13, 3)\n\n\narray([[[ 0., 1., 2., 3., 4.],\n [ 0., 1., 2., 3., 4.],\n [ 0., 1., 2., 3., 4.],\n [ 0., 0., 0., 0., 0.]],\n\n [[ 5., 6., 7., 8., 9.],\n [ 5., 6., 7., 8., 9.],\n [ 5., 6., 7., 8., 9.],\n [ 0., 0., 0., 0., 0.]],\n\n [[10., 11., 12., nan, nan],\n [10., 11., 12., nan, nan],\n [10., 11., 12., nan, nan],\n [ 0., 0., 0., 1., 1.]]])\n\n\n\nwl = 5\nt = np.arange(10)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl)(t)\ntest_eq(X.shape[1:], (1, wl))\nitemify(X,)\n\ninput shape: (10,)\n\n\n(#5) [(array([[0, 1, 2, 3, 4]]),),(array([[1, 2, 3, 4, 5]]),),(array([[2, 3, 4, 5, 6]]),),(array([[3, 4, 5, 6, 7]]),),(array([[4, 5, 6, 7, 8]]),)]\n\n\n\nwl = 5\nh = 1\n\nt = np.arange(10)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=1, horizon=h)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (10,)\n[(array([[0, 1, 2, 3, 4]]), 5), (array([[1, 2, 3, 4, 5]]), 6), (array([[2, 3, 4, 5, 6]]), 7), (array([[3, 4, 5, 6, 7]]), 8), (array([[4, 5, 6, 7, 8]]), 9)]\n\n\n\nwl = 5\nh = 2 # 2 or more\n\nt = np.arange(10)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, horizon=h)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, (2, ))\n\ninput shape: (10,)\n[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]\n\n\n\nwl = 5\nh = 2 # 2 or more\n\nt = np.arange(10).reshape(1, -1)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=1, horizon=h, get_y=None, seq_first=False)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, (2, ))\n\ninput shape: (1, 10)\n[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]\n\n\n\nwl = 5\nh = 2 # 2 or more\n\nt = np.arange(10).reshape(1, -1)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=1, horizon=h, seq_first=False)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\n\ninput shape: (1, 10)\n[(array([[0, 1, 2, 3, 4]]), array([5, 6])), (array([[1, 2, 3, 4, 5]]), array([6, 7])), (array([[2, 3, 4, 5, 6]]), array([7, 8])), (array([[3, 4, 5, 6, 7]]), array([8, 9]))]\n\n\n\nwl = 5\n\nt = np.arange(10).reshape(1, -1)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=3, horizon=1, get_y=None, seq_first=False)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (1, 10)\n[(array([[0, 1, 2, 3, 4]]), 5), (array([[3, 4, 5, 6, 7]]), 8)]\n\n\n\nwl = 5\nstart = 3\n\nt = np.arange(20)\nprint('input shape:', t.shape)\nX, y = SlidingWindow(wl, stride=None, horizon=1, start=start)(t)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (20,)\n[(array([[3, 4, 5, 6, 7]]), 8), (array([[ 8, 9, 10, 11, 12]]), 13), (array([[13, 14, 15, 16, 17]]), 18)]\n\n\n\nwl = 5\n\nt = np.arange(20)\nprint('input shape:', t.shape)\ndf = pd.DataFrame(t, columns=['var'])\ndisplay(df)\nX, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None)(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (20,)\n[(array([[0, 1, 2, 3, 4]]), 5), (array([[5, 6, 7, 8, 9]]), 10), (array([[10, 11, 12, 13, 14]]), 15)]\n\n\n\n\n\n\n\n\n\nvar\n\n\n\n\n0\n0\n\n\n1\n1\n\n\n2\n2\n\n\n3\n3\n\n\n4\n4\n\n\n5\n5\n\n\n6\n6\n\n\n7\n7\n\n\n8\n8\n\n\n9\n9\n\n\n10\n10\n\n\n11\n11\n\n\n12\n12\n\n\n13\n13\n\n\n14\n14\n\n\n15\n15\n\n\n16\n16\n\n\n17\n17\n\n\n18\n18\n\n\n19\n19\n\n\n\n\n\n\n\n\nwl = 5\n\nt = np.arange(20)\nprint('input shape:', t.shape)\ndf = pd.DataFrame(t, columns=['var'])\ndisplay(df)\nX, y = SlidingWindow(wl, stride=1, horizon=1, get_y=None)(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (20,)\n[(array([[0, 1, 2, 3, 4]]), 5), (array([[1, 2, 3, 4, 5]]), 6), (array([[2, 3, 4, 5, 6]]), 7), (array([[3, 4, 5, 6, 7]]), 8), (array([[4, 5, 6, 7, 8]]), 9), (array([[5, 6, 7, 8, 9]]), 10), (array([[ 6, 7, 8, 9, 10]]), 11), (array([[ 7, 8, 9, 10, 11]]), 12), (array([[ 8, 9, 10, 11, 12]]), 13), (array([[ 9, 10, 11, 12, 13]]), 14), (array([[10, 11, 12, 13, 14]]), 15), (array([[11, 12, 13, 14, 15]]), 16), (array([[12, 13, 14, 15, 16]]), 17), (array([[13, 14, 15, 16, 17]]), 18), (array([[14, 15, 16, 17, 18]]), 19)]\n\n\n\n\n\n\n\n\n\nvar\n\n\n\n\n0\n0\n\n\n1\n1\n\n\n2\n2\n\n\n3\n3\n\n\n4\n4\n\n\n5\n5\n\n\n6\n6\n\n\n7\n7\n\n\n8\n8\n\n\n9\n9\n\n\n10\n10\n\n\n11\n11\n\n\n12\n12\n\n\n13\n13\n\n\n14\n14\n\n\n15\n15\n\n\n16\n16\n\n\n17\n17\n\n\n18\n18\n\n\n19\n19\n\n\n\n\n\n\n\n\nwl = 5\n\nt = np.arange(20)\nprint('input shape:', t.shape)\ndf = pd.DataFrame(t, columns=['var']).T\ndisplay(df)\nX, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None, seq_first=False)(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: (20,)\n[(array([[0, 1, 2, 3, 4]]), 5), (array([[5, 6, 7, 8, 9]]), 10), (array([[10, 11, 12, 13, 14]]), 15)]\n\n\n\n\n\n\n\n\n\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n\n\n\n\nvar\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n\n\n\n\n\n\n\n\nwl = 5\nn_vars = 3\n\nt = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))\nprint('input shape:', t.shape)\ndf = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])\ndisplay(df)\nX, y = SlidingWindow(wl, horizon=1)(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (n_vars, wl))\n\ninput shape: torch.Size([10, 3])\n[(array([[ 0, 1, 2, 3, 4],\n [ 0, 10, 20, 30, 40],\n [ 0, 100, 200, 300, 400]]), array([ 5, 50, 500])), (array([[ 1, 2, 3, 4, 5],\n [ 10, 20, 30, 40, 50],\n [100, 200, 300, 400, 500]]), array([ 6, 60, 600])), (array([[ 2, 3, 4, 5, 6],\n [ 20, 30, 40, 50, 60],\n [200, 300, 400, 500, 600]]), array([ 7, 70, 700])), (array([[ 3, 4, 5, 6, 7],\n [ 30, 40, 50, 60, 70],\n [300, 400, 500, 600, 700]]), array([ 8, 80, 800])), (array([[ 4, 5, 6, 7, 8],\n [ 40, 50, 60, 70, 80],\n [400, 500, 600, 700, 800]]), array([ 9, 90, 900]))]\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\nvar_2\n\n\n\n\n0\n0\n0\n0\n\n\n1\n1\n10\n100\n\n\n2\n2\n20\n200\n\n\n3\n3\n30\n300\n\n\n4\n4\n40\n400\n\n\n5\n5\n50\n500\n\n\n6\n6\n60\n600\n\n\n7\n7\n70\n700\n\n\n8\n8\n80\n800\n\n\n9\n9\n90\n900\n\n\n\n\n\n\n\n\nwl = 5\nn_vars = 3\n\nt = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))\nprint('input shape:', t.shape)\ndf = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])\ndisplay(df)\nX, y = SlidingWindow(wl, horizon=1, get_y=\"var_0\")(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (n_vars, wl))\n\ninput shape: torch.Size([10, 3])\n[(array([[ 0, 1, 2, 3, 4],\n [ 0, 10, 20, 30, 40],\n [ 0, 100, 200, 300, 400]]), 5), (array([[ 1, 2, 3, 4, 5],\n [ 10, 20, 30, 40, 50],\n [100, 200, 300, 400, 500]]), 6), (array([[ 2, 3, 4, 5, 6],\n [ 20, 30, 40, 50, 60],\n [200, 300, 400, 500, 600]]), 7), (array([[ 3, 4, 5, 6, 7],\n [ 30, 40, 50, 60, 70],\n [300, 400, 500, 600, 700]]), 8), (array([[ 4, 5, 6, 7, 8],\n [ 40, 50, 60, 70, 80],\n [400, 500, 600, 700, 800]]), 9)]\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\nvar_2\n\n\n\n\n0\n0\n0\n0\n\n\n1\n1\n10\n100\n\n\n2\n2\n20\n200\n\n\n3\n3\n30\n300\n\n\n4\n4\n40\n400\n\n\n5\n5\n50\n500\n\n\n6\n6\n60\n600\n\n\n7\n7\n70\n700\n\n\n8\n8\n80\n800\n\n\n9\n9\n90\n900\n\n\n\n\n\n\n\n\nwl = 5\nn_vars = 3\n\nt = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))\nprint('input shape:', t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)]+['target']\ndf = pd.DataFrame(t, columns=columns)\ndisplay(df)\nX, y = SlidingWindow(wl, horizon=1, get_x=columns[:-1], get_y='target')(df)\nitems = itemify(X, y)\nprint(items)\ntest_eq(items[0][0].shape, (n_vars-1, wl))\ntest_eq(items[0][1].shape, ())\n\ninput shape: torch.Size([10, 3])\n[(array([[ 0, 1, 2, 3, 4],\n [ 0, 10, 20, 30, 40]]), 500), (array([[ 1, 2, 3, 4, 5],\n [10, 20, 30, 40, 50]]), 600), (array([[ 2, 3, 4, 5, 6],\n [20, 30, 40, 50, 60]]), 700), (array([[ 3, 4, 5, 6, 7],\n [30, 40, 50, 60, 70]]), 800), (array([[ 4, 5, 6, 7, 8],\n [40, 50, 60, 70, 80]]), 900)]\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\ntarget\n\n\n\n\n0\n0\n0\n0\n\n\n1\n1\n10\n100\n\n\n2\n2\n20\n200\n\n\n3\n3\n30\n300\n\n\n4\n4\n40\n400\n\n\n5\n5\n50\n500\n\n\n6\n6\n60\n600\n\n\n7\n7\n70\n700\n\n\n8\n8\n80\n800\n\n\n9\n9\n90\n900\n\n\n\n\n\n\n\n\nn_vars = 3\n\nt = (np.random.rand(1000, n_vars) - .5).cumsum(0)\nprint(t.shape)\nplt.plot(t)\nplt.show()\nX, y = SlidingWindow(5, stride=None, horizon=0, get_x=[0,1], get_y=2)(t)\ntest_eq(X[0].shape, (n_vars-1, wl))\ntest_eq(y[0].shape, ())\nprint(X.shape, y.shape)\n\n(1000, 3)\n(200, 2, 5) (200,)\n\n\n\n\n\n\n\n\n\n\nwl = 5\nn_vars = 3\n\nt = (np.random.rand(100, n_vars) - .5).cumsum(0)\nprint(t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)]+['target']\ndf = pd.DataFrame(t, columns=columns)\ndisplay(df)\nX, y = SlidingWindow(5, horizon=0, get_x=columns[:-1], get_y='target')(df)\ntest_eq(X[0].shape, (n_vars-1, wl))\ntest_eq(y[0].shape, ())\nprint(X.shape, y.shape)\n\n(100, 3)\n(96, 2, 5) (96,)\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\ntarget\n\n\n\n\n0\n0.154072\n0.197194\n-0.083179\n\n\n1\n0.402744\n-0.248788\n-0.560573\n\n\n2\n0.448209\n0.224215\n-0.681264\n\n\n3\n0.631502\n0.406760\n-1.162043\n\n\n4\n1.099973\n0.179926\n-0.712690\n\n\n...\n...\n...\n...\n\n\n95\n-0.405079\n3.662311\n-2.779159\n\n\n96\n-0.445625\n3.488809\n-2.663381\n\n\n97\n-0.187349\n3.304898\n-2.695971\n\n\n98\n-0.100652\n3.505663\n-2.590652\n\n\n99\n0.371272\n3.279901\n-2.764369\n\n\n\n\n100 rows × 3 columns\n\n\n\n\nseq_len = 100\nn_vars = 5\nt = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)\nprint(t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)]+['target']\ndf = pd.DataFrame(t, columns=columns)\ndisplay(df)\nX, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)\ntest_eq(X[0].shape, (n_vars-1, wl))\ntest_eq(y[0].shape, ())\nprint(X.shape, y.shape)\n\n(100, 5)\n(96, 4, 5) (96,)\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\nvar_2\nvar_3\ntarget\n\n\n\n\n0\n0.443639\n-0.288128\n-0.049732\n0.288915\n0.325872\n\n\n1\n-0.047608\n-0.009738\n0.056768\n0.541395\n0.017496\n\n\n2\n-0.243972\n0.102227\n0.361387\n0.628397\n0.049012\n\n\n3\n-0.721266\n0.045104\n0.724062\n0.940693\n0.510875\n\n\n4\n-0.641269\n0.141927\n0.793837\n1.158903\n0.417040\n\n\n...\n...\n...\n...\n...\n...\n\n\n95\n3.488117\n2.345512\n0.745483\n0.258568\n2.468550\n\n\n96\n3.187006\n1.945844\n0.833228\n0.511198\n2.115330\n\n\n97\n3.019862\n1.739802\n0.488732\n0.881324\n2.387837\n\n\n98\n3.314247\n1.992000\n0.119230\n0.797794\n2.327720\n\n\n99\n3.394578\n2.012458\n0.003244\n0.387125\n2.345970\n\n\n\n\n100 rows × 5 columns\n\n\n\n\nseq_len = 100\nn_vars = 5\n\nt = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)\nprint(t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)] + ['target']\ndf = pd.DataFrame(t, columns=columns).T\ndisplay(df)\nX, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)\ntest_eq(X[0].shape, (n_vars-1, wl))\ntest_eq(y[0].shape, ())\nprint(X.shape, y.shape)\n\n(100, 5)\n(96, 4, 5) (96,)\n\n\n\n\n\n\n\n\n\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n...\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n\n\n\n\nvar_0\n-0.407162\n-0.742169\n-1.193053\n-1.058644\n-0.721243\n-1.056788\n-1.316226\n-1.247859\n-1.391482\n-1.258618\n...\n-2.847911\n-3.118643\n-3.444248\n-3.036050\n-2.664068\n-2.473782\n-2.508080\n-2.878210\n-2.841170\n-2.688932\n\n\nvar_1\n0.111643\n-0.286318\n-0.221917\n-0.026094\n-0.332200\n-0.376518\n-0.144763\n0.225361\n0.487134\n0.435856\n...\n1.569158\n1.294548\n1.564455\n1.501243\n1.490928\n1.450602\n1.440730\n1.755607\n1.380986\n1.236284\n\n\nvar_2\n-0.126951\n-0.484267\n-0.480375\n-0.706987\n-0.571379\n-0.561959\n-0.717696\n-0.586035\n-0.298053\n-0.047405\n...\n-1.748096\n-1.508691\n-1.158258\n-1.116485\n-1.153738\n-1.575450\n-1.875091\n-1.613255\n-1.274859\n-1.592096\n\n\nvar_3\n-0.462238\n-0.748774\n-0.625473\n-0.360442\n-0.789178\n-0.530832\n-0.785290\n-0.413452\n0.083685\n-0.110964\n...\n-4.873450\n-4.382297\n-4.531454\n-4.087051\n-4.087801\n-4.391084\n-4.262526\n-4.650170\n-4.465874\n-4.535273\n\n\ntarget\n0.241454\n0.084139\n-0.012974\n0.096328\n0.501035\n0.697043\n0.229185\n0.497430\n0.552922\n0.218345\n...\n-4.582426\n-4.194067\n-3.785398\n-3.808516\n-3.629740\n-3.398645\n-3.828007\n-3.600028\n-3.614195\n-3.592783\n\n\n\n\n5 rows × 100 columns\n\n\n\n\nseq_len = 100\nn_vars = 5\nt = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)\nprint(t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)] + ['target']\ndf = pd.DataFrame(t, columns=columns).T\ndisplay(df)\nX, y = SlidingWindow(5, stride=None, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)\ntest_eq(X[0].shape, (n_vars-1, wl))\ntest_eq(y[0].shape, ())\nprint(X.shape, y.shape)\n\n(100, 5)\n(20, 4, 5) (20,)\n\n\n\n\n\n\n\n\n\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n...\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n\n\n\n\nvar_0\n0.210943\n-0.264863\n-0.307942\n0.176782\n-0.188244\n0.118824\n0.593353\n0.611408\n0.176396\n0.566034\n...\n-4.738294\n-5.138743\n-5.203979\n-4.835758\n-4.534974\n-4.310112\n-4.366365\n-4.328250\n-4.527717\n-4.432726\n\n\nvar_1\n-0.086375\n-0.457413\n0.025571\n0.428256\n0.611573\n0.319714\n-0.085129\n0.161735\n0.052730\n-0.356617\n...\n7.203539\n7.300534\n7.267954\n6.838923\n7.054134\n6.612532\n7.108269\n6.966000\n7.407915\n7.332567\n\n\nvar_2\n0.166139\n-0.231839\n-0.468804\n-0.565628\n-0.500941\n-0.706951\n-0.881385\n-1.138549\n-0.978276\n-0.952727\n...\n0.391942\n0.802356\n0.395688\n0.033288\n0.147283\n0.589911\n0.360847\n0.322019\n0.478120\n0.278228\n\n\nvar_3\n-0.234297\n-0.467480\n-0.925036\n-0.572783\n-0.345585\n0.149537\n-0.078098\n-0.577732\n-0.771975\n-0.322283\n...\n-1.487032\n-1.971348\n-2.300616\n-2.767312\n-2.657974\n-2.880908\n-2.567235\n-2.758240\n-2.605518\n-2.166444\n\n\ntarget\n-0.416187\n-0.164800\n-0.283554\n-0.534897\n-0.896808\n-0.456572\n-0.889556\n-1.178456\n-0.877891\n-1.176442\n...\n-6.094650\n-6.510793\n-6.408799\n-6.685696\n-6.672726\n-6.210781\n-6.377436\n-5.974001\n-5.755187\n-5.608240\n\n\n\n\n5 rows × 100 columns\n\n\n\n\nfrom tsai.data.validation import TrainValidTestSplitter\n\n\nseq_len = 100\nn_vars = 5\nt = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)\nprint(t.shape)\ncolumns=[f'var_{i}' for i in range(n_vars-1)]+['target']\ndf = pd.DataFrame(t, columns=columns)\ndisplay(df)\nX, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)\nsplits = TrainValidTestSplitter(valid_size=.2, shuffle=False)(y)\nX.shape, y.shape, splits\n\n(100, 5)\n\n\n\n\n\n\n\n\n\nvar_0\nvar_1\nvar_2\nvar_3\ntarget\n\n\n\n\n0\n0.123248\n-0.081596\n0.099444\n0.447980\n-0.397975\n\n\n1\n0.469671\n-0.334499\n0.307867\n0.141345\n-0.131085\n\n\n2\n0.522902\n-0.696817\n0.386597\n0.156818\n0.128043\n\n\n3\n0.487025\n-0.966153\n-0.050574\n-0.248479\n-0.088962\n\n\n4\n0.396284\n-1.319821\n-0.113121\n-0.379227\n0.313690\n\n\n...\n...\n...\n...\n...\n...\n\n\n95\n6.138836\n-1.602917\n1.713049\n1.421797\n-1.873899\n\n\n96\n5.892472\n-1.896914\n1.401137\n1.065859\n-2.239942\n\n\n97\n5.421917\n-1.728568\n1.481270\n0.998533\n-2.157474\n\n\n98\n5.763120\n-1.404330\n1.931361\n1.295956\n-1.934397\n\n\n99\n5.827842\n-1.762438\n1.831712\n1.014259\n-1.831573\n\n\n\n\n100 rows × 5 columns\n\n\n\n((96, 4, 5),\n (96,),\n ((#77) [0,1,2,3,4,5,6,7,8,9...], (#19) [77,78,79,80,81,82,83,84,85,86...]))\n\n\n\ndata = np.concatenate([np.linspace(0, 1, 11).reshape(-1,1).repeat(2, 1), np.arange(11).reshape(-1,1)], -1)\ndf_test = pd.DataFrame(data, columns=['col1', 'col2', 'target'])\ndf_test['target'] = df_test['target'].astype(int)\ndf_test\n\n\n\n\n\n\n\n\ncol1\ncol2\ntarget\n\n\n\n\n0\n0.0\n0.0\n0\n\n\n1\n0.1\n0.1\n1\n\n\n2\n0.2\n0.2\n2\n\n\n3\n0.3\n0.3\n3\n\n\n4\n0.4\n0.4\n4\n\n\n5\n0.5\n0.5\n5\n\n\n6\n0.6\n0.6\n6\n\n\n7\n0.7\n0.7\n7\n\n\n8\n0.8\n0.8\n8\n\n\n9\n0.9\n0.9\n9\n\n\n10\n1.0\n1.0\n10\n\n\n\n\n\n\n\n\ndef _y_func(o): return o[:, 0]\n\n\nfor wl in np.arange(1, 20):\n x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=_y_func)(df_test)\n test_eq(x.shape[0], math.ceil((len(df_test))/wl))\n test_eq(x.shape[0], y.shape[0])\n test_eq(x.shape[2], wl)\n test_close(x[:, 0, 0]*10, y)\n\n\nfor wl in np.arange(1, 20):\n x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)\n test_eq(x.shape[0], math.ceil((len(df_test))/ wl))\n test_eq(x.shape[0], y.shape[0])\n test_eq(x.shape[2], wl)\n\n\nfor wl in np.arange(1, len(df_test)+1):\n x, y = SlidingWindow(wl, None, pad_remainder=False, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)\n test_eq(x.shape[0], len(df_test) // wl)\n test_eq(x.shape[0], y.shape[0])\n test_eq(x.shape[2], wl)\n\n\nfor wl in np.arange(1, 20):\n x, _ = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=[], horizon=0)(df_test)\n test_eq(x.shape[0], math.ceil((len(df_test))/wl))\n test_eq(x.shape[2], wl)\n\n\nfor wl in np.arange(2, len(df_test)):\n x, _ = SlidingWindow(wl, wl, pad_remainder=False, get_x=['col1', 'col2'], get_y=[], horizon=0)(df_test)\n test_eq(x.shape[0], len(df_test) // wl)\n test_eq(x.shape[2], wl)\n\n\ndf = pd.DataFrame()\ndf['sample_id'] = np.concatenate([np.ones(n)*(i + 1) for i,n in enumerate([13])])\ndf['var1'] = df['sample_id'] + df.index.values - 1\ndf['var2'] = df['var1'] * 10\ndf['target'] = (df['var1']).astype(int)\ndf['sample_id'] = df['sample_id'].astype(int)\ndf\n\n\n\n\n\n\n\n\nsample_id\nvar1\nvar2\ntarget\n\n\n\n\n0\n1\n0.0\n0.0\n0\n\n\n1\n1\n1.0\n10.0\n1\n\n\n2\n1\n2.0\n20.0\n2\n\n\n3\n1\n3.0\n30.0\n3\n\n\n4\n1\n4.0\n40.0\n4\n\n\n5\n1\n5.0\n50.0\n5\n\n\n6\n1\n6.0\n60.0\n6\n\n\n7\n1\n7.0\n70.0\n7\n\n\n8\n1\n8.0\n80.0\n8\n\n\n9\n1\n9.0\n90.0\n9\n\n\n10\n1\n10.0\n100.0\n10\n\n\n11\n1\n11.0\n110.0\n11\n\n\n12\n1\n12.0\n120.0\n12\n\n\n\n\n\n\n\n\nX, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=False, padding=\"pre\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (2, 2, 3))\ntest_eq(y.shape, (2, 4))\nX, y\n\n(array([[[ 4., 5., 6.],\n [40., 50., 60.]],\n \n [[ 6., 7., 8.],\n [60., 70., 80.]]]),\n array([[ 7, 8, 9, 10],\n [ 9, 10, 11, 12]]))\n\n\n\nX, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=True, padding=\"pre\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (3, 2, 3))\ntest_eq(y.shape, (3, 4))\nX, y\n\n(array([[[nan, 3., 4.],\n [nan, 30., 40.]],\n \n [[ 4., 5., 6.],\n [40., 50., 60.]],\n \n [[ 6., 7., 8.],\n [60., 70., 80.]]]),\n array([[ 5, 6, 7, 8],\n [ 7, 8, 9, 10],\n [ 9, 10, 11, 12]]))\n\n\n\nX, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=False, padding=\"post\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (2, 2, 3))\ntest_eq(y.shape, (2, 4))\nX, y\n\n(array([[[ 3., 4., 5.],\n [30., 40., 50.]],\n \n [[ 5., 6., 7.],\n [50., 60., 70.]]]),\n array([[ 6, 7, 8, 9],\n [ 8, 9, 10, 11]]))\n\n\n\nX, y = SlidingWindow(window_len=3, stride=2, start=3, pad_remainder=True, padding=\"post\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (3, 2, 3))\ntest_eq(y.shape, (3, 4))\nX, y\n\n(array([[[ 3., 4., 5.],\n [30., 40., 50.]],\n \n [[ 5., 6., 7.],\n [50., 60., 70.]],\n \n [[ 7., 8., 9.],\n [70., 80., 90.]]]),\n array([[ 6., 7., 8., 9.],\n [ 8., 9., 10., 11.],\n [10., 11., 12., nan]]))\n\n\n\nX, y = SlidingWindow(window_len=10, stride=2, start=3, pad_remainder=True, padding=\"pre\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (1, 2, 10))\ntest_eq(y.shape, (1, 4))\nX, y\n\n(array([[[nan, nan, nan, nan, 3., 4., 5., 6., 7., 8.],\n [nan, nan, nan, nan, 30., 40., 50., 60., 70., 80.]]]),\n array([[ 9, 10, 11, 12]]))\n\n\n\nX, y = SlidingWindow(window_len=10, stride=2, start=3, pad_remainder=True, padding=\"post\", padding_value=np.nan, add_padding_feature=False,\n get_x=[\"var1\", \"var2\"], get_y=[\"target\"], y_func=None, output_processor=None, copy=False, horizon=4, seq_first=True, sort_by=None,\n ascending=True, check_leakage=True)(df)\ntest_eq(X.shape, (1, 2, 10))\ntest_eq(y.shape, (1, 4))\nX, y\n\n(array([[[ 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.],\n [ 30., 40., 50., 60., 70., 80., 90., 100., 110., 120.]]]),\n array([[nan, nan, nan, nan]]))\n\n\n\nsource\n\n\nSlidingWindowPanel\n\n SlidingWindowPanel (window_len:int, unique_id_cols:list,\n stride:Optional[int]=1, start:int=0,\n pad_remainder:bool=False, padding:str='post',\n padding_value:float=nan,\n add_padding_feature:bool=True,\n get_x:Union[NoneType,int,list]=None,\n get_y:Union[NoneType,int,list]=None,\n y_func:Optional[<built-infunctioncallable>]=None,\n output_processor:Optional[<built-\n infunctioncallable>]=None, copy:bool=False,\n horizon:Union[int,list]=1, seq_first:bool=True,\n sort_by:Optional[list]=None, ascending:bool=True,\n check_leakage:bool=True, return_key:bool=False,\n verbose:bool=True)\n\nApplies a sliding window to a pd.DataFrame.\nArgs: window_len = length of lookback window unique_id_cols = pd.DataFrame columns that will be used to identify a time series for each entity. stride = n datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap) start = determines the step where the first window is applied: 0 (default), a given step (int), or random within the 1st stride (None). pad_remainder = allows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data). padding = ‘pre’ or ‘post’ (optional, defaults to ‘pre’): pad either before or after each sequence. If pad_remainder == False, it indicates the starting point to create the sequence (‘pre’ from the end, and ‘post’ from the beginning) padding_value = value (float) that will be used for padding. Default: np.nan add_padding_feature = add an additional feature indicating whether each timestep is padded (1) or not (0). horizon = number of future datapoints to predict (y). If get_y is [] horizon will be set to 0. * 0 for last step in each sub-window. * n > 0 for a range of n future steps (1 to n). * n < 0 for a range of n past steps (-n + 1 to 0). * list : for those exact timesteps. get_x = indices of columns that contain the independent variable (xs). If None, all data will be used as x. get_y = indices of columns that contain the target (ys). If None, all data will be used as y. [] means no y data is created (unlabeled data). y_func = function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1! output_processor = optional function to filter output (X (and y if available)). This is useful when some values need to be removed. The function should take X and y (even if it’s None) as arguments. copy = copy the original object to avoid changes in it. seq_first = True if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len) sort_by = column/s used for sorting the array in ascending order ascending = used in sorting check_leakage = checks if there’s leakage in the output between X and y return_key = when True, the key corresponsing to unique_id_cols for each sample is returned verbose = controls verbosity. True or 1 displays progress bar. 2 or more show records that cannot be created due to its length.\nInput: You can use np.ndarray, pd.DataFrame or torch.Tensor as input shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)\n\nsamples = 100_000\nwl = 5\nn_vars = 10\n\nt = (torch.stack(n_vars * [torch.arange(samples)]).T * tensor([10**i for i in range(n_vars)]))\ndf = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])\ndf['time'] = np.arange(len(t))\ndf['device'] = 0\ndf['target'] = np.random.randint(0, 2, len(df))\ndf2 = df.copy()\ndf3 = df.copy()\ncols = ['var_0', 'var_1', 'var_2', 'device', 'target']\ndf2[cols] = df2[cols] + 1\ndf3[cols] = df3[cols] + 2\ndf2 = df2.loc[:3]\ndf['region'] = 'A'\ndf2['region'] = 'A'\ndf3['region'] = 'B'\ndf = pd.concat([df, df2, df3], ignore_index=True)\ndf['index'] = np.arange(len(df))\ndf = df.sample(frac=1).reset_index(drop=True)\ndisplay(df.head())\ndf.shape\n\n\n\n\n\n\n\n\nvar_0\nvar_1\nvar_2\nvar_3\nvar_4\nvar_5\nvar_6\nvar_7\nvar_8\nvar_9\ntime\ndevice\ntarget\nregion\nindex\n\n\n\n\n0\n86008\n860080\n8600800\n86008000\n860080000\n8600800000\n86008000000\n860080000000\n8600800000000\n86008000000000\n86008\n0\n0\nA\n86008\n\n\n1\n90003\n900012\n9000102\n90001000\n900010000\n9000100000\n90001000000\n900010000000\n9000100000000\n90001000000000\n90001\n2\n2\nB\n190005\n\n\n2\n43819\n438172\n4381702\n43817000\n438170000\n4381700000\n43817000000\n438170000000\n4381700000000\n43817000000000\n43817\n2\n3\nB\n143821\n\n\n3\n80751\n807492\n8074902\n80749000\n807490000\n8074900000\n80749000000\n807490000000\n8074900000000\n80749000000000\n80749\n2\n3\nB\n180753\n\n\n4\n84917\n849152\n8491502\n84915000\n849150000\n8491500000\n84915000000\n849150000000\n8491500000000\n84915000000000\n84915\n2\n3\nB\n184919\n\n\n\n\n\n\n\n(200004, 15)\n\n\n\nX, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], \n horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=False)(df)\nX.shape, y.shape\n\nprocessing data...\n...data processed\nconcatenating X...\n...X concatenated\nconcatenating y...\n...y concatenated\n\n\n\n\n\n\n\n\n\n((199992, 10, 5), (199992,))\n\n\n\nX, y, key = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], \n horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=True)(df)\nX.shape, y.shape, key.shape\n\nprocessing data...\n...data processed\nconcatenating X...\n...X concatenated\nconcatenating y...\n...y concatenated\n\n\n\n\n\n\n\n\n\n((199992, 10, 5), (199992,), (199992,))\n\n\n\nX, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], \n horizon=0, seq_first=True, sort_by=['time'], ascending=True)(df)\nX.shape, y.shape\n\nprocessing data...\n...data processed\nconcatenating X...\n...X concatenated\nconcatenating y...\n...y concatenated\n\n\n\n\n\n\n\n\n\n((199992, 10, 5), (199992,))\n\n\n\n# y_func must be a function applied to axis=1!\ndef y_max(o): return np.max(o, axis=1)\n\n\nX, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'], \n y_func=y_max, horizon=5, seq_first=True, sort_by=['time'], ascending=True)(df)\nX.shape, y.shape\n\nprocessing data...\n...data processed\nconcatenating X...\n...X concatenated\nconcatenating y...\n...y concatenated\n\n\n\n\n\n\n\n \n \n 0.00% [0/3 00:00<?]\n \n \n\n\n((199982, 10, 5), (199982,))\n\n\n\nsource\n\n\nidentify_padding\n\n identify_padding (float_mask, value=-1)\n\nIdentifies padded subsequences in a mask of type float\nThis function identifies as padded subsequences those where all values == nan from the end of the sequence (last dimension) across all channels, and sets those values to the selected value (default = -1)\nArgs: mask: boolean or float mask value: scalar that will be used to identify padded subsequences\n\nwl = 5\nstride = 5\n\nt = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)\nprint('input shape:', t.shape)\nX, _ = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)\nX = tensor(X)\nX[0, 1, -2:] = np.nan\nX[1,..., :3] = np.nan\nprint(X)\nidentify_padding(torch.isnan(X).float())\n\ninput shape: (13, 3)\ntensor([[[ 0., 1., 2., 3., 4.],\n [ 0., 1., 2., nan, nan],\n [ 0., 1., 2., 3., 4.],\n [ 0., 0., 0., 0., 0.]],\n\n [[nan, nan, nan, 8., 9.],\n [nan, nan, nan, 8., 9.],\n [nan, nan, nan, 8., 9.],\n [nan, nan, nan, 0., 0.]],\n\n [[10., 11., 12., nan, nan],\n [10., 11., 12., nan, nan],\n [10., 11., 12., nan, nan],\n [ 0., 0., 0., 1., 1.]]])\n\n\ntensor([[[0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 1.],\n [0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.]],\n\n [[1., 1., 1., 0., 0.],\n [1., 1., 1., 0., 0.],\n [1., 1., 1., 0., 0.],\n [1., 1., 1., 0., 0.]],\n\n [[0., 0., 0., 1., 1.],\n [0., 0., 0., 1., 1.],\n [0., 0., 0., 1., 1.],\n [0., 0., 0., 0., 0.]]])\n\n\n\n\nForecasting data preparation\n\nsource\n\nbasic_data_preparation_fn\n\n basic_data_preparation_fn (df, drop_duplicates=True, datetime_col=None,\n use_index=False, keep='last',\n add_missing_datetimes=True, freq='1D',\n method=None, sort_by=None)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe to preprocess\n\n\ndrop_duplicates\nbool\nTrue\nflag to indicate if rows with duplicate datetime info should be removed\n\n\ndatetime_col\nNoneType\nNone\nstr indicating the name of the column/s that contains the datetime info\n\n\nuse_index\nbool\nFalse\nflag to indicate if the datetime info is in the index\n\n\nkeep\nstr\nlast\nstr to indicate what data should be kept in case of duplicate rows\n\n\nadd_missing_datetimes\nbool\nTrue\nflaf to indicate if missing datetimes should be added\n\n\nfreq\nstr\n1D\nstr to indicate the frequency used in the datetime info. Used in case missing timestamps exists\n\n\nmethod\nNoneType\nNone\nstr indicating the method used to fill data for missing timestamps: None, ‘bfill’, ‘ffill’\n\n\nsort_by\nNoneType\nNone\nstr or list of str to indicate if how to sort data. If use_index=True the index will be used to sort the dataframe.\n\n\n\n\ndf_len = 100\ndatetime_col = 'datetime' \ndf = pd.DataFrame(np.arange(df_len), columns=['value'])\ndf['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')\ndf['type'] = 1\n# drop 10 rows at random\ndf = df.drop(df.sample(10).index)\n# add 2 duplicated rows\ndf = pd.concat([df, df.sample(2)])\ndisplay(df)\n\nnew_df = basic_data_preparation_fn(df, drop_duplicates=True, datetime_col=datetime_col, use_index=False, keep='last', \n add_missing_datetimes=True, freq='1D', method='ffill', sort_by=datetime_col)\ndisplay(new_df)\n\n\n\n\n\n\n\n\nvalue\ndatetime\ntype\n\n\n\n\n0\n0\n1749-03-31\n1\n\n\n1\n1\n1749-04-01\n1\n\n\n3\n3\n1749-04-03\n1\n\n\n4\n4\n1749-04-04\n1\n\n\n5\n5\n1749-04-05\n1\n\n\n...\n...\n...\n...\n\n\n96\n96\n1749-07-05\n1\n\n\n97\n97\n1749-07-06\n1\n\n\n99\n99\n1749-07-08\n1\n\n\n0\n0\n1749-03-31\n1\n\n\n19\n19\n1749-04-19\n1\n\n\n\n\n92 rows × 3 columns\n\n\n\n\n\n\n\n\n\n\nvalue\ndatetime\ntype\n\n\n\n\n0\n0\n1749-03-31\n1\n\n\n1\n1\n1749-04-01\n1\n\n\n2\n1\n1749-04-02\n1\n\n\n3\n3\n1749-04-03\n1\n\n\n4\n4\n1749-04-04\n1\n\n\n...\n...\n...\n...\n\n\n95\n95\n1749-07-04\n1\n\n\n96\n96\n1749-07-05\n1\n\n\n97\n97\n1749-07-06\n1\n\n\n98\n97\n1749-07-07\n1\n\n\n99\n99\n1749-07-08\n1\n\n\n\n\n100 rows × 3 columns\n\n\n\n\nsource\n\n\ncheck_safe_conversion\n\n check_safe_conversion (o, dtype='float32', cols=None)\n\nChecks if the conversion to float is safe\n\nassert check_safe_conversion(-2**11, 'float16') == True\nassert check_safe_conversion(-2**11 - 1, 'float16') == False\nassert check_safe_conversion(2**24, 'float32') == True\nassert check_safe_conversion(2**24+1, 'float32') == False\nassert check_safe_conversion(2**53, 'float64') == True\nassert check_safe_conversion(2**53+1, 'float64') == False\n\ndf = pd.DataFrame({'a': [1, 2, 3], 'b': [2**24, 2**24+1, 2**24+2]})\nassert not check_safe_conversion(df, 'float32')\nassert check_safe_conversion(df, 'int32')\nassert check_safe_conversion(df, 'float32', cols='a')\nassert not check_safe_conversion(df, 'float32', cols='b')\n\n-2147483648 1 3 2147483647\n-2147483648 16777216 16777218 2147483647\n\n\n/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_30986/657350933.py:39: UserWarning: Unsafe conversion to float32: {'a': True, 'b': False}\n warnings.warn(f\"Unsafe conversion to {dtype}: {dict(zip(cols, checks))}\")\n/var/folders/42/4hhwknbd5kzcbq48tmy_gbp00000gn/T/ipykernel_30986/657350933.py:39: UserWarning: Unsafe conversion to float32: {'b': False}\n warnings.warn(f\"Unsafe conversion to {dtype}: {dict(zip(cols, checks))}\")\n\n\n\nsource\n\n\nprepare_forecasting_data\n\nfrom tsai.data.validation import get_forecasting_splits\n\n\nfcst_history = 10 \nfcst_horizon = 5\nstride = 1\nvalid_size=0.2\ntest_size=0.2\n\ndf = pd.DataFrame()\ndf['target'] = np.arange(50)\n\nX, y = prepare_forecasting_data(df, fcst_history, fcst_horizon)\nsplits = get_forecasting_splits(df, fcst_history, fcst_horizon, valid_size=valid_size, test_size=test_size, stride=stride, show_plot=False)\nassert y[splits[0]][-1][0][-1] == y[splits[1]][0][0][0] - stride\nassert y[splits[1]][-1][0][-1] == y[splits[2]][0][0][0] - stride\nfor s,t in zip(splits, ['\\ntrain_split:', '\\nvalid_split:', '\\ntest_split :']):\n print(t)\n for xi, yi in zip(X[s], y[s]):\n print(xi, yi)\n\n\ntrain_split:\n[[0 1 2 3 4 5 6 7 8 9]] [[10 11 12 13 14]]\n[[ 1 2 3 4 5 6 7 8 9 10]] [[11 12 13 14 15]]\n[[ 2 3 4 5 6 7 8 9 10 11]] [[12 13 14 15 16]]\n[[ 3 4 5 6 7 8 9 10 11 12]] [[13 14 15 16 17]]\n[[ 4 5 6 7 8 9 10 11 12 13]] [[14 15 16 17 18]]\n[[ 5 6 7 8 9 10 11 12 13 14]] [[15 16 17 18 19]]\n[[ 6 7 8 9 10 11 12 13 14 15]] [[16 17 18 19 20]]\n[[ 7 8 9 10 11 12 13 14 15 16]] [[17 18 19 20 21]]\n[[ 8 9 10 11 12 13 14 15 16 17]] [[18 19 20 21 22]]\n[[ 9 10 11 12 13 14 15 16 17 18]] [[19 20 21 22 23]]\n[[10 11 12 13 14 15 16 17 18 19]] [[20 21 22 23 24]]\n[[11 12 13 14 15 16 17 18 19 20]] [[21 22 23 24 25]]\n[[12 13 14 15 16 17 18 19 20 21]] [[22 23 24 25 26]]\n[[13 14 15 16 17 18 19 20 21 22]] [[23 24 25 26 27]]\n[[14 15 16 17 18 19 20 21 22 23]] [[24 25 26 27 28]]\n[[15 16 17 18 19 20 21 22 23 24]] [[25 26 27 28 29]]\n\nvalid_split:\n[[20 21 22 23 24 25 26 27 28 29]] [[30 31 32 33 34]]\n[[21 22 23 24 25 26 27 28 29 30]] [[31 32 33 34 35]]\n[[22 23 24 25 26 27 28 29 30 31]] [[32 33 34 35 36]]\n[[23 24 25 26 27 28 29 30 31 32]] [[33 34 35 36 37]]\n[[24 25 26 27 28 29 30 31 32 33]] [[34 35 36 37 38]]\n[[25 26 27 28 29 30 31 32 33 34]] [[35 36 37 38 39]]\n\ntest_split :\n[[30 31 32 33 34 35 36 37 38 39]] [[40 41 42 43 44]]\n[[31 32 33 34 35 36 37 38 39 40]] [[41 42 43 44 45]]\n[[32 33 34 35 36 37 38 39 40 41]] [[42 43 44 45 46]]\n[[33 34 35 36 37 38 39 40 41 42]] [[43 44 45 46 47]]\n[[34 35 36 37 38 39 40 41 42 43]] [[44 45 46 47 48]]\n[[35 36 37 38 39 40 41 42 43 44]] [[45 46 47 48 49]]\n\n\n\nfcst_history = 10 \nfcst_horizon = 5\nstride = 1\nvalid_size=0.2\ntest_size=0.2\n\ndf = pd.DataFrame()\ndf['target'] = np.arange(50)\n\nX, y = prepare_forecasting_data(df, fcst_history, fcst_horizon, x_vars=None, y_vars=[])\nsplits = get_forecasting_splits(df, fcst_history, fcst_horizon, valid_size=valid_size, test_size=test_size, stride=stride, show_plot=False)\nassert y is None\n\n\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime' \ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**i).astype(np.float32)\ndisplay(df)\n\nfcst_history = 10\nfcst_horizon = 5\nx_vars = df.columns\ny_vars = None\ndtype = None\n\nX, y = prepare_forecasting_data(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, x_vars=x_vars, y_vars=y_vars, dtype=dtype)\ntest_eq(X.shape, (86, 3, 10))\ntest_eq(y.shape, (86, 3, 5))\ntest_eq(y[:3, :, 0], X[:3, :, -1] + np.array([1, 10, 100]).reshape(1, 1, -1))\nprint(X[:3].astype(int))\nprint(y[:3].astype(int))\n\n\n\n\n\n\n\n\nvalue_0\nvalue_1\nvalue_2\n\n\n\n\n0\n0.0\n0.0\n0.0\n\n\n1\n1.0\n10.0\n100.0\n\n\n2\n2.0\n20.0\n200.0\n\n\n3\n3.0\n30.0\n300.0\n\n\n4\n4.0\n40.0\n400.0\n\n\n...\n...\n...\n...\n\n\n95\n95.0\n950.0\n9500.0\n\n\n96\n96.0\n960.0\n9600.0\n\n\n97\n97.0\n970.0\n9700.0\n\n\n98\n98.0\n980.0\n9800.0\n\n\n99\n99.0\n990.0\n9900.0\n\n\n\n\n100 rows × 3 columns\n\n\n\n[[[ 0 1 2 3 4 5 6 7 8 9]\n [ 0 10 20 30 40 50 60 70 80 90]\n [ 0 100 200 300 400 500 600 700 800 900]]\n\n [[ 1 2 3 4 5 6 7 8 9 10]\n [ 10 20 30 40 50 60 70 80 90 100]\n [ 100 200 300 400 500 600 700 800 900 1000]]\n\n [[ 2 3 4 5 6 7 8 9 10 11]\n [ 20 30 40 50 60 70 80 90 100 110]\n [ 200 300 400 500 600 700 800 900 1000 1100]]]\n[[[ 10 11 12 13 14]\n [ 100 110 120 130 140]\n [1000 1100 1200 1300 1400]]\n\n [[ 11 12 13 14 15]\n [ 110 120 130 140 150]\n [1100 1200 1300 1400 1500]]\n\n [[ 12 13 14 15 16]\n [ 120 130 140 150 160]\n [1200 1300 1400 1500 1600]]]\n\n\n\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime' \ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\n\ndf['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')\ndf['type'] = np.random.randint(0, 4, df_len)\ndf['target'] = np.arange(df_len)\ndisplay(df)\n\nfcst_history = 10\nfcst_horizon = 5\nx_vars = ['value_0', 'value_1', 'value_2', 'target']\ny_vars = 'target'\ndtype = np.float32\n\nX, y = prepare_forecasting_data(df, fcst_history=fcst_history, fcst_horizon=fcst_horizon, x_vars=x_vars, y_vars=y_vars, dtype=dtype)\ntest_eq(X.shape, (86, 4, 10))\ntest_eq(y.shape, (86, 1, 5))\nprint(X[:3].astype(int))\nprint(y[:3])\n\n\n\n\n\n\n\n\nvalue_0\nvalue_1\nvalue_2\ndatetime\ntype\ntarget\n\n\n\n\n0\n0.0\n0.0\n0.0\n1749-03-31\n3\n0\n\n\n1\n10.0\n100.0\n1000.0\n1749-04-01\n1\n1\n\n\n2\n20.0\n200.0\n2000.0\n1749-04-02\n1\n2\n\n\n3\n30.0\n300.0\n3000.0\n1749-04-03\n1\n3\n\n\n4\n40.0\n400.0\n4000.0\n1749-04-04\n2\n4\n\n\n...\n...\n...\n...\n...\n...\n...\n\n\n95\n950.0\n9500.0\n95000.0\n1749-07-04\n0\n95\n\n\n96\n960.0\n9600.0\n96000.0\n1749-07-05\n0\n96\n\n\n97\n970.0\n9700.0\n97000.0\n1749-07-06\n3\n97\n\n\n98\n980.0\n9800.0\n98000.0\n1749-07-07\n2\n98\n\n\n99\n990.0\n9900.0\n99000.0\n1749-07-08\n1\n99\n\n\n\n\n100 rows × 6 columns\n\n\n\n[[[ 0 10 20 30 40 50 60 70 80 90]\n [ 0 100 200 300 400 500 600 700 800 900]\n [ 0 1000 2000 3000 4000 5000 6000 7000 8000 9000]\n [ 0 1 2 3 4 5 6 7 8 9]]\n\n [[ 10 20 30 40 50 60 70 80 90 100]\n [ 100 200 300 400 500 600 700 800 900 1000]\n [ 1000 2000 3000 4000 5000 6000 7000 8000 9000 10000]\n [ 1 2 3 4 5 6 7 8 9 10]]\n\n [[ 20 30 40 50 60 70 80 90 100 110]\n [ 200 300 400 500 600 700 800 900 1000 1100]\n [ 2000 3000 4000 5000 6000 7000 8000 9000 10000 11000]\n [ 2 3 4 5 6 7 8 9 10 11]]]\n[[[10. 11. 12. 13. 14.]]\n\n [[11. 12. 13. 14. 15.]]\n\n [[12. 13. 14. 15. 16.]]]\n\n\n\nsource\n\n\nget_today\n\n get_today (datetime_format='%Y-%m-%d')\n\n\ntest_eq(get_today(), dt.datetime.today().strftime(\"%Y-%m-%d\"))\n\n\nsource\n\n\nsplit_fcst_datetime\n\n split_fcst_datetime (fcst_datetime)\n\nDefine fcst start and end dates\n\n\n\n\nDetails\n\n\n\n\nfcst_datetime\nstr or list of str with datetime\n\n\n\n\ntest_eq(split_fcst_datetime(None), (None, None))\ntest_eq(split_fcst_datetime('2020-01-01'), ('2020-01-01', '2020-01-01'))\ntest_eq(split_fcst_datetime(['2019-01-01', '2020-01-01']), ['2019-01-01', '2020-01-01'])\n\n\nsource\n\n\nset_df_datetime\n\n set_df_datetime (df, datetime_col=None, use_index=False)\n\nMake sure datetime column or index is of the right date type.\n\n# Test\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime'\ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\ndf['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')\nset_df_datetime(df, datetime_col=datetime_col)\ntest_eq(df['datetime'].dtypes, np.dtype('datetime64[ns]'))\ndf_index = df.set_index('datetime')\nset_df_datetime(df_index, use_index=True)\ntest_eq(df_index.index.dtype, np.dtype('datetime64[ns]'))\n\n\nsource\n\n\nget_df_datetime_bounds\n\n get_df_datetime_bounds (df, datetime_col=None, use_index=False)\n\nReturns the start date and and dates used by the forecast\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing forecasting data\n\n\ndatetime_col\nNoneType\nNone\nstr data column containing the datetime\n\n\nuse_index\nbool\nFalse\nbool flag to indicate if index should be used to get column\n\n\n\n\n# Test\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime'\ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\ndf['datetime'] = pd.date_range(pd.to_datetime('1749-03-31'), periods=df_len, freq='1D')\ntest_eq(get_df_datetime_bounds(df, datetime_col=datetime_col), (df['datetime'].min(), df['datetime'].max()))\ndf_index = df.set_index('datetime')\ntest_eq(get_df_datetime_bounds(df_index, use_index=True), (df_index.index.min(), df_index.index.max()))\n\n\nsource\n\n\nget_fcst_bounds\n\n get_fcst_bounds (df, fcst_datetime, fcst_history=None, fcst_horizon=None,\n freq='D', datetime_format='%Y-%m-%d', datetime_col=None,\n use_index=False)\n\nReturns the start and end datetimes used by the forecast\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing forecasting data\n\n\nfcst_datetime\n\n\ndatetime for which a fcst is created. Optionally tuple of datatimes if the fcst is created for a range of dates.\n\n\nfcst_history\nNoneType\nNone\n# steps used as input\n\n\nfcst_horizon\nNoneType\nNone\n# predicted steps\n\n\nfreq\nstr\nD\ndatetime units. May contain a letters only or a combination of ints + letters: eg. “7D”\n\n\ndatetime_format\nstr\n%Y-%m-%d\nformat used to convert “today”\n\n\ndatetime_col\nNoneType\nNone\nstr data column containing the datetime\n\n\nuse_index\nbool\nFalse\nbool flag to indicate if index should be used to get column\n\n\n\n\nfrom datetime import timedelta\n\n\n# Test\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime'\ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\nfreq = \"7D\"\ntoday = pd.Timestamp(get_today()).floor(freq)\ndf['datetime'] = pd.date_range(None, today, periods=df_len, freq=freq)\ndisplay(df)\nmax_dt = pd.Timestamp(df['datetime'].max()).floor(freq)\nfcst_history = 30\nfcst_horizon = 10\nfcst_datetime = max_dt - timedelta(weeks=fcst_horizon)\nprint('fcst_datetime :', fcst_datetime)\nstart_datetime, end_datetime = get_fcst_bounds(df, fcst_datetime, datetime_col=datetime_col, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq)\nprint('start_datetime:', start_datetime)\nprint('end_datetime :', end_datetime)\ndates = pd.date_range(start_datetime, end_datetime, freq=freq)\nprint(dates)\ntest_eq(len(dates), fcst_history + fcst_horizon)\ntest_eq(end_datetime, max_dt)\n\n\n\n\n\n\n\n\nvalue_0\nvalue_1\nvalue_2\ndatetime\n\n\n\n\n0\n0.0\n0.0\n0.0\n2021-11-25\n\n\n1\n10.0\n100.0\n1000.0\n2021-12-02\n\n\n2\n20.0\n200.0\n2000.0\n2021-12-09\n\n\n3\n30.0\n300.0\n3000.0\n2021-12-16\n\n\n4\n40.0\n400.0\n4000.0\n2021-12-23\n\n\n...\n...\n...\n...\n...\n\n\n95\n950.0\n9500.0\n95000.0\n2023-09-21\n\n\n96\n960.0\n9600.0\n96000.0\n2023-09-28\n\n\n97\n970.0\n9700.0\n97000.0\n2023-10-05\n\n\n98\n980.0\n9800.0\n98000.0\n2023-10-12\n\n\n99\n990.0\n9900.0\n99000.0\n2023-10-19\n\n\n\n\n100 rows × 4 columns\n\n\n\nfcst_datetime : 2023-08-10 00:00:00\nstart_datetime: 2023-01-19 00:00:00\nend_datetime : 2023-10-19 00:00:00\nDatetimeIndex(['2023-01-19', '2023-01-26', '2023-02-02', '2023-02-09',\n '2023-02-16', '2023-02-23', '2023-03-02', '2023-03-09',\n '2023-03-16', '2023-03-23', '2023-03-30', '2023-04-06',\n '2023-04-13', '2023-04-20', '2023-04-27', '2023-05-04',\n '2023-05-11', '2023-05-18', '2023-05-25', '2023-06-01',\n '2023-06-08', '2023-06-15', '2023-06-22', '2023-06-29',\n '2023-07-06', '2023-07-13', '2023-07-20', '2023-07-27',\n '2023-08-03', '2023-08-10', '2023-08-17', '2023-08-24',\n '2023-08-31', '2023-09-07', '2023-09-14', '2023-09-21',\n '2023-09-28', '2023-10-05', '2023-10-12', '2023-10-19'],\n dtype='datetime64[ns]', freq='7D')\n\n\n\nsource\n\n\nfilter_df_by_datetime\n\n filter_df_by_datetime (df, start_datetime=None, end_datetime=None,\n datetime_col=None, use_index=False)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing forecasting data\n\n\nstart_datetime\nNoneType\nNone\nlower datetime bound\n\n\nend_datetime\nNoneType\nNone\nupper datetime bound\n\n\ndatetime_col\nNoneType\nNone\nstr data column containing the datetime\n\n\nuse_index\nbool\nFalse\nbool flag to indicate if index should be used to get column\n\n\n\n\n# Test\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime'\ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\nfreq = \"7D\"\ndf['datetime'] = pd.date_range(None, pd.Timestamp(get_today()).floor(freq), periods=df_len, freq=freq)\ndisplay(df)\nmax_dt = pd.Timestamp(df['datetime'].max()).floor(freq)\nfcst_history = 30\nfcst_horizon = 10\nfcst_datetime = pd.date_range(end=fcst_datetime, periods=fcst_horizon + 1, freq=freq).floor(freq)[-1]\nstart_datetime, end_datetime = get_fcst_bounds(df, fcst_datetime, datetime_col=datetime_col, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq)\ntest_eq(len(filter_df_by_datetime(df, start_datetime=start_datetime, end_datetime=end_datetime, datetime_col=datetime_col)), fcst_history + fcst_horizon)\n\n\n\n\n\n\n\n\nvalue_0\nvalue_1\nvalue_2\ndatetime\n\n\n\n\n0\n0.0\n0.0\n0.0\n2021-11-25\n\n\n1\n10.0\n100.0\n1000.0\n2021-12-02\n\n\n2\n20.0\n200.0\n2000.0\n2021-12-09\n\n\n3\n30.0\n300.0\n3000.0\n2021-12-16\n\n\n4\n40.0\n400.0\n4000.0\n2021-12-23\n\n\n...\n...\n...\n...\n...\n\n\n95\n950.0\n9500.0\n95000.0\n2023-09-21\n\n\n96\n960.0\n9600.0\n96000.0\n2023-09-28\n\n\n97\n970.0\n9700.0\n97000.0\n2023-10-05\n\n\n98\n980.0\n9800.0\n98000.0\n2023-10-12\n\n\n99\n990.0\n9900.0\n99000.0\n2023-10-19\n\n\n\n\n100 rows × 4 columns\n\n\n\n\nsource\n\n\nget_fcst_data_from_df\n\n get_fcst_data_from_df (df, fcst_datetime, fcst_history=None,\n fcst_horizon=None, freq='D',\n datetime_format='%Y-%m-%d', datetime_col=None,\n use_index=False)\n\nGet forecasting data from a dataframe\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndf\n\n\ndataframe containing forecasting data\n\n\nfcst_datetime\n\n\ndatetime for which a fcst is created. Optionally tuple of datatimes if the fcst is created for a range of dates.\n\n\nfcst_history\nNoneType\nNone\n# steps used as input\n\n\nfcst_horizon\nNoneType\nNone\n# predicted steps\n\n\nfreq\nstr\nD\ndatetime units. May contain a letters only or a combination of ints + letters: eg. “7D”\n\n\ndatetime_format\nstr\n%Y-%m-%d\nformat used to convert “today”\n\n\ndatetime_col\nNoneType\nNone\nstr data column containing the datetime\n\n\nuse_index\nbool\nFalse\nbool flag to indicate if index should be used to get column\n\n\n\n\n# Test\ndf_len = 100\nn_values = 3\ndatetime_col = 'datetime'\ndf = pd.DataFrame()\nfor i in range(n_values):\n df[f\"value_{i}\"] = (np.arange(df_len) * 10**(i + 1)).astype(np.float32)\nfreq = \"7D\"\ndf['datetime'] = pd.date_range(None, pd.Timestamp(get_today()).floor(freq), periods=df_len, freq=freq)\ndisplay(df)\nmax_dt = pd.Timestamp(df['datetime'].max()).floor(freq)\nfcst_history = 30\nfcst_horizon = 10\nfcst_datetime = pd.date_range(end=fcst_datetime, periods=fcst_horizon + 1, freq=freq).floor(freq)[-1]\ntest_eq(len(get_fcst_data_from_df(df, fcst_datetime, fcst_history=fcst_history, fcst_horizon=fcst_horizon, freq=freq, datetime_col=datetime_col)), \n fcst_history + fcst_horizon)\n\n\n\n\n\n\n\n\nvalue_0\nvalue_1\nvalue_2\ndatetime\n\n\n\n\n0\n0.0\n0.0\n0.0\n2021-11-25\n\n\n1\n10.0\n100.0\n1000.0\n2021-12-02\n\n\n2\n20.0\n200.0\n2000.0\n2021-12-09\n\n\n3\n30.0\n300.0\n3000.0\n2021-12-16\n\n\n4\n40.0\n400.0\n4000.0\n2021-12-23\n\n\n...\n...\n...\n...\n...\n\n\n95\n950.0\n9500.0\n95000.0\n2023-09-21\n\n\n96\n960.0\n9600.0\n96000.0\n2023-09-28\n\n\n97\n970.0\n9700.0\n97000.0\n2023-10-05\n\n\n98\n980.0\n9800.0\n98000.0\n2023-10-12\n\n\n99\n990.0\n9900.0\n99000.0\n2023-10-19\n\n\n\n\n100 rows × 4 columns", + "crumbs": [ + "Data", + "Data preparation" + ] + }, + { + "objectID": "models.tabtransformer.html", + "href": "models.tabtransformer.html", + "title": "TabTransformer", + "section": "", + "text": "This is an unofficial TabTransformer Pytorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co)\nHuang, X., Khetan, A., Cvitkovic, M., & Karnin, Z. (2020). TabTransformer: Tabular Data Modeling Using Contextual Embeddings. arXiv preprint https://arxiv.org/pdf/2012.06678\nOfficial repo: https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer\n\nsource\n\nTabTransformer\n\n TabTransformer (classes, cont_names, c_out, column_embed=True,\n add_shared_embed=False, shared_embed_div=8,\n embed_dropout=0.1, drop_whole_embed=False, d_model=32,\n n_layers=6, n_heads=8, d_k=None, d_v=None, d_ff=None,\n res_attention=True, attention_act='gelu',\n res_dropout=0.1, norm_cont=True, mlp_mults=(4, 2),\n mlp_dropout=0.0, mlp_act=None, mlp_skip=False,\n mlp_bn=False, bn_final=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nFullEmbeddingDropout\n\n FullEmbeddingDropout (dropout:float)\n\nFrom https://github.com/jrzaurin/pytorch-widedeep/blob/be96b57f115e4a10fde9bb82c35380a3ac523f52/pytorch_widedeep/models/tab_transformer.py#L153\n\nsource\n\n\nSharedEmbedding\n\n SharedEmbedding (num_embeddings, embedding_dim, shared_embed=True,\n add_shared_embed=False, shared_embed_div=8)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nifnone\n\n ifnone (a, b)\n\nb if a is None else a\n\nfrom fastai.tabular.all import *\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\ndls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names=\"salary\",\n cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],\n cont_names = ['age', 'fnlwgt', 'education-num'],\n procs = [Categorify, FillMissing, Normalize])\nx_cat, x_cont, yb = first(dls.train)\nmodel = TabTransformer(dls.classes, dls.cont_names, dls.c)\ntest_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))", + "crumbs": [ + "Models", + "Tabular models", + "TabTransformer" + ] + }, + { + "objectID": "models.multimodal.html", + "href": "models.multimodal.html", + "title": "Multimodal", + "section": "", + "text": "Functionality used for multiple data modalities.\n\nA common scenario in time-series related tasks is the use of multiple types of inputs:\n\nstatic: data that doesn’t change with time\nobserved: temporal data only available in the past\nknown: temporal data available in the past and in the future\n\nAt the same time, these different modalities may contain:\n\ncategorical data\ncontinuous or numerical data\n\nBased on that, there are situations where we have up to 6 different types of input features:\n\ns_cat: static continuous variables\no_cat: observed categorical variables\no_cont: observed continuous variables\nk_cat: known categorical variables\nk_cont: known continuous variables\n\n\nsource\n\nget_feat_idxs\n\n get_feat_idxs (c_in, s_cat_idxs=None, s_cont_idxs=None, o_cat_idxs=None,\n o_cont_idxs=None)\n\nCalculate the indices of the features used for training.\n\nsource\n\n\nget_o_cont_idxs\n\n get_o_cont_idxs (c_in, s_cat_idxs=None, s_cont_idxs=None,\n o_cat_idxs=None)\n\nCalculate the indices of the observed continuous features.\n\nc_in = 7\ns_cat_idxs = 3\ns_cont_idxs = [1, 4, 5]\no_cat_idxs = None\no_cont_idxs = None\n\ns_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = get_feat_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)\n\ntest_eq(s_cat_idxs, [3])\ntest_eq(s_cont_idxs, [1, 4, 5])\ntest_eq(o_cat_idxs, [])\ntest_eq(o_cont_idxs, [0, 2, 6])\n\n\nsource\n\n\nTensorSplitter\n\n TensorSplitter (s_cat_idxs:list=None, s_cont_idxs:list=None,\n o_cat_idxs:list=None, o_cont_idxs:list=None,\n k_cat_idxs:list=None, k_cont_idxs:list=None,\n horizon:int=None)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ns_cat_idxs\nlist\nNone\nlist of indices for static categorical variables\n\n\ns_cont_idxs\nlist\nNone\nlist of indices for static continuous variables\n\n\no_cat_idxs\nlist\nNone\nlist of indices for observed categorical variables\n\n\no_cont_idxs\nlist\nNone\nlist of indices for observed continuous variables\n\n\nk_cat_idxs\nlist\nNone\nlist of indices for known categorical variables\n\n\nk_cont_idxs\nlist\nNone\nlist of indices for known continuous variables\n\n\nhorizon\nint\nNone\nnumber of time steps to predict ahead\n\n\n\n\n# Example usage\nbs = 4\ns_cat_idxs = 1\ns_cont_idxs = [0, 2]\no_cat_idxs =[ 3, 4, 5]\no_cont_idxs = None\nk_cat_idxs = None\nk_cont_idxs = None\nhorizon=None\ninput_tensor = torch.randn(bs, 6, 10) # 3D input tensor\nsplitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)\nslices = splitter(input_tensor)\nfor i, slice_tensor in enumerate(slices):\n print(f\"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}\")\n\nSlice 1: torch.Size([4, 1]) torch.int64\nSlice 2: torch.Size([4, 2]) torch.int64\nSlice 3: torch.Size([4, 3, 10]) torch.float32\nSlice 4: torch.Size([4, 0, 10]) torch.float32\n\n\n\n# Example usage\nbs = 4\ns_cat_idxs = 1\ns_cont_idxs = [0, 2]\no_cat_idxs =[ 3, 4, 5]\no_cont_idxs = None\nk_cat_idxs = [6,7]\nk_cont_idxs = 8\nhorizon=3\ninput_tensor = torch.randn(4, 9, 10) # 3D input tensor\nsplitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs,\n k_cat_idxs=k_cat_idxs, k_cont_idxs=k_cont_idxs, horizon=horizon)\nslices = splitter(input_tensor)\nfor i, slice_tensor in enumerate(slices):\n print(f\"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}\")\n\nSlice 1: torch.Size([4, 1]) torch.int64\nSlice 2: torch.Size([4, 2]) torch.int64\nSlice 3: torch.Size([4, 3, 7]) torch.float32\nSlice 4: torch.Size([4, 0, 7]) torch.float32\nSlice 5: torch.Size([4, 2, 10]) torch.float32\nSlice 6: torch.Size([4, 1, 10]) torch.float32\n\n\n\nsource\n\n\nEmbeddings\n\n Embeddings (n_embeddings:list, embedding_dims:list=None,\n padding_idx:int=0, embed_dropout:float=0.0, **kwargs)\n\nEmbedding layers for each categorical variable in a 2D or 3D tensor\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nn_embeddings\nlist\n\nList of num_embeddings for each categorical variable\n\n\nembedding_dims\nlist\nNone\nList of embedding dimensions for each categorical variable\n\n\npadding_idx\nint\n0\nEmbedding padding_idx\n\n\nembed_dropout\nfloat\n0.0\nDropout probability for Embedding layer\n\n\nkwargs\n\n\n\n\n\n\n\nt1 = torch.randint(0, 7, (16, 1))\nt2 = torch.randint(0, 5, (16, 1))\nt = torch.cat([t1, t2], 1).float()\nemb = Embeddings([7, 5], None, embed_dropout=0.1)\ntest_eq(emb(t).shape, (16, 12))\n\n\nt1 = torch.randint(0, 7, (16, 1))\nt2 = torch.randint(0, 5, (16, 1))\nt = torch.cat([t1, t2], 1).float()\nemb = Embeddings([7, 5], [4, 3])\ntest_eq(emb(t).shape, (16, 12))\n\n\nt1 = torch.randint(0, 7, (16, 1, 10))\nt2 = torch.randint(0, 5, (16, 1, 10))\nt = torch.cat([t1, t2], 1).float()\nemb = Embeddings([7, 5], None)\ntest_eq(emb(t).shape, (16, 12, 10))\n\n\nsource\n\n\nStaticBackbone\n\n StaticBackbone (c_in, c_out, seq_len, d=None, layers=[200, 100],\n dropouts=[0.1, 0.2], act=ReLU(inplace=True),\n use_bn=False, lin_first=False)\n\nStatic backbone model to embed static features\n\n# Example usage\nbs = 4\nc_in = 6\nc_out = 8\nseq_len = 10\ninput_tensor = torch.randn(bs, c_in, seq_len) # 3D input tensor\nbackbone = StaticBackbone(c_in, c_out, seq_len)\noutput_tensor = backbone(input_tensor)\nprint(f\"Input shape: {input_tensor.shape} Output shape: {output_tensor.shape}\")\nbackbone\n\nInput shape: torch.Size([4, 6, 10]) Output shape: torch.Size([4, 100])\n\n\nStaticBackbone(\n (flatten): Reshape(bs)\n (mlp): ModuleList(\n (0): LinBnDrop(\n (0): Dropout(p=0.1, inplace=False)\n (1): Linear(in_features=60, out_features=200, bias=True)\n (2): ReLU(inplace=True)\n )\n (1): LinBnDrop(\n (0): Dropout(p=0.2, inplace=False)\n (1): Linear(in_features=200, out_features=100, bias=True)\n (2): ReLU(inplace=True)\n )\n )\n)\n\n\n\n# class MultInputWrapper(nn.Module):\n# \"Model wrapper for input tensors with static and/ or observed, categorical and/ or numerical features.\"\n\n# def __init__(self,\n# arch,\n# c_in:int=None, # number of input variables\n# c_out:int=None, # number of output variables\n# seq_len:int=None, # input sequence length\n# d:tuple=None, # shape of the output tensor\n# dls:TSDataLoaders=None, # TSDataLoaders object\n# s_cat_idxs:list=None, # list of indices for static categorical variables\n# s_cat_embeddings:list=None, # list of num_embeddings for each static categorical variable\n# s_cat_embedding_dims:list=None, # list of embedding dimensions for each static categorical variable\n# s_cont_idxs:list=None, # list of indices for static continuous variables\n# o_cat_idxs:list=None, # list of indices for observed categorical variables\n# o_cat_embeddings:list=None, # list of num_embeddings for each observed categorical variable\n# o_cat_embedding_dims:list=None, # list of embedding dimensions for each observed categorical variable\n# o_cont_idxs:list=None, # list of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n# patch_len:int=None, # Number of time steps in each patch.\n# patch_stride:int=None, # Stride of the patch.\n# flatten:bool=False, # boolean indicating whether to flatten bacbone's output tensor\n# use_bn:bool=False, # boolean indicating whether to use batch normalization in the head\n# fc_dropout:float=0., # dropout probability for the fully connected layer in the head\n# custom_head=None, # custom head to replace the default head\n# **kwargs\n# ):\n# super().__init__()\n\n# # attributes\n# c_in = c_in or dls.vars\n# c_out = c_out or dls.c\n# seq_len = seq_len or dls.len\n# d = d or (dls.d if dls is not None else None)\n# self.c_in, self.c_out, self.seq_len, self.d = c_in, c_out, seq_len, d\n\n# # tensor splitter\n# if o_cont_idxs is None:\n# o_cont_idxs = get_o_cont_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs)\n# self.splitter = TensorSplitter(s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs)\n# s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = self.splitter.s_cat_idxs, self.splitter.s_cont_idxs, self.splitter.o_cat_idxs, self.splitter.o_cont_idxs\n# assert c_in == sum([len(s_cat_idxs), len(s_cont_idxs), len(o_cat_idxs), len(o_cont_idxs)])\n\n# # embeddings\n# self.s_embeddings = Embeddings(s_cat_embeddings, s_cat_embedding_dims)\n# self.o_embeddings = Embeddings(o_cat_embeddings, o_cat_embedding_dims)\n\n# # patch encoder\n# if patch_len is not None:\n# patch_stride = patch_stride or patch_len\n# self.patch_encoder = PatchEncoder(patch_len, patch_stride, seq_len=seq_len)\n# c_mult = patch_len\n# seq_len = (seq_len + self.patch_encoder.pad_size - patch_len) // patch_stride + 1\n# else:\n# self.patch_encoder = nn.Identity()\n# c_mult = 1\n\n# # backbone\n# n_s_features = len(s_cont_idxs) + self.s_embeddings.embedding_dims\n# n_o_features = (len(o_cont_idxs) + self.o_embeddings.embedding_dims) * c_mult\n# s_backbone = StaticBackbone(c_in=n_s_features, c_out=c_out, seq_len=1, **kwargs)\n# if isinstance(arch, str):\n# arch = get_arch(arch)\n# if isinstance(arch, nn.Module):\n# o_model = arch\n# else:\n# o_model = build_ts_model(arch, c_in=n_o_features, c_out=c_out, seq_len=seq_len, d=d, **kwargs)\n# assert hasattr(o_model, \"backbone\"), \"the selected arch must have a backbone\"\n# o_backbone = getattr(o_model, \"backbone\")\n\n# # head\n# o_head_nf = output_size_calculator(o_backbone, n_o_features, seq_len)[0]\n# s_head_nf = s_backbone.head_nf\n# self.backbone = nn.ModuleList([o_backbone, s_backbone])\n# self.head_nf = o_head_nf + s_head_nf\n# if custom_head is not None:\n# if isinstance(custom_head, nn.Module): self.head = custom_head\n# else:self. head = custom_head(self.head_nf, c_out, seq_len, d=d)\n# else:\n# if \"rocket\" in o_model.__name__.lower():\n# self.head = rocket_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, use_bn=use_bn, fc_dropout=fc_dropout)\n# else:\n# self.head = lin_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, flatten=flatten, use_bn=use_bn, fc_dropout=fc_dropout)\n\n# def forward(self, x):\n# # split x into static cat, static cont, observed cat, and observed cont\n# s_cat, s_cont, o_cat, o_cont = self.splitter(x)\n\n# # create categorical embeddings\n# s_cat = self.s_embeddings(s_cat)\n# o_cat = self.o_embeddings(o_cat)\n\n# # contatenate static and observed features\n# s_x = torch.cat([s_cat, s_cont], 1)\n# o_x = torch.cat([o_cat, o_cont], 1)\n\n# # patch encoder\n# o_x = self.patch_encoder(o_x)\n\n# # pass static and observed features through their respective backbones\n# for i,(b,xi) in enumerate(zip(self.backbone, [o_x, s_x])):\n# if i == 0:\n# x = b(xi)\n# if x.ndim == 2:\n# x = x[..., None]\n# else:\n# x = torch.cat([x, b(xi)[..., None].repeat(1, 1, x.shape[-1])], 1)\n\n# # head\n# x = self.head(x)\n# return x\n\n\n# from tsai.models.InceptionTimePlus import InceptionTimePlus\n\n\n# c_in = 6\n# c_out = 3\n# seq_len = 97\n# d = None\n\n# s_cat_idxs=2\n# s_cont_idxs=4\n# o_cat_idxs=[0, 3]\n# o_cont_idxs=None\n# s_cat_embeddings = 5\n# s_cat_embedding_dims = None\n# o_cat_embeddings = [7, 3]\n# o_cat_embedding_dims = [3, None]\n\n# t0 = torch.randint(0, 7, (16, 1, seq_len)) # cat\n# t1 = torch.randn(16, 1, seq_len)\n# t2 = torch.randint(0, 5, (16, 1, seq_len)) # cat\n# t3 = torch.randint(0, 3, (16, 1, seq_len)) # cat\n# t4 = torch.randn(16, 1, seq_len)\n# t5 = torch.randn(16, 1, seq_len)\n\n# t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float()\n\n# patch_lens = [None, 5, 5, 5, 5]\n# patch_strides = [None, None, 1, 3, 5]\n# for patch_len, patch_stride in zip(patch_lens, patch_strides):\n# for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"MultiRocketPlus\"]:\n# print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n\n# model = MultInputWrapper(\n# arch=arch,\n# c_in=c_in,\n# c_out=c_out,\n# seq_len=seq_len,\n# d=d,\n# s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n# s_cont_idxs=s_cont_idxs,\n# o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n# o_cont_idxs=o_cont_idxs,\n# patch_len=patch_len,\n# patch_stride=patch_stride,\n# )\n\n# test_eq(model(t).shape, (16,3))\n\n\nsource\n\n\nFusionMLP\n\n FusionMLP (comb_dim, layers, act='relu', dropout=0.0, use_bn=True)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nbs = 16\nemb_dim = 128\nseq_len = 20\ncat_dim = 24\ncont_feat = 3\n\ncomb_dim = emb_dim + cat_dim + cont_feat\nemb = torch.randn(bs, emb_dim, seq_len)\ncat = torch.randn(bs, cat_dim)\ncont = torch.randn(bs, cont_feat)\nfusion_mlp = FusionMLP(comb_dim, layers=comb_dim, act='relu', dropout=.1)\noutput = fusion_mlp(cat, cont, emb)\ntest_eq(output.shape, (bs, comb_dim))\n\n\nbs = 16\nemb_dim = 50000\ncat_dim = 24\ncont_feat = 3\n\ncomb_dim = emb_dim + cat_dim + cont_feat\nemb = torch.randn(bs, emb_dim)\ncat = torch.randn(bs, cat_dim)\ncont = torch.randn(bs, cont_feat)\nfusion_mlp = FusionMLP(comb_dim, layers=[128], act='relu', dropout=.1)\noutput = fusion_mlp(cat, cont, emb)\ntest_eq(output.shape, (bs, 128))\n\n\nsource\n\n\nMultInputBackboneWrapper\n\n MultInputBackboneWrapper (arch, c_in:int=None, seq_len:int=None,\n d:tuple=None,\n dls:tsai.data.core.TSDataLoaders=None,\n s_cat_idxs:list=None,\n s_cat_embeddings:list=None,\n s_cat_embedding_dims:list=None,\n s_cont_idxs:list=None, o_cat_idxs:list=None,\n o_cat_embeddings:list=None,\n o_cat_embedding_dims:list=None,\n o_cont_idxs:list=None, patch_len:int=None,\n patch_stride:int=None,\n fusion_layers:list=[128],\n fusion_act:str='relu',\n fusion_dropout:float=0.0,\n fusion_use_bn:bool=True, **kwargs)\n\nModel backbone wrapper for input tensors with static and/ or observed, categorical and/ or numerical features.\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\narch\n\n\n\n\n\nc_in\nint\nNone\nnumber of input variables\n\n\nseq_len\nint\nNone\ninput sequence length\n\n\nd\ntuple\nNone\nshape of the output tensor\n\n\ndls\nTSDataLoaders\nNone\nTSDataLoaders object\n\n\ns_cat_idxs\nlist\nNone\nlist of indices for static categorical variables\n\n\ns_cat_embeddings\nlist\nNone\nlist of num_embeddings for each static categorical variable\n\n\ns_cat_embedding_dims\nlist\nNone\nlist of embedding dimensions for each static categorical variable\n\n\ns_cont_idxs\nlist\nNone\nlist of indices for static continuous variables\n\n\no_cat_idxs\nlist\nNone\nlist of indices for observed categorical variables\n\n\no_cat_embeddings\nlist\nNone\nlist of num_embeddings for each observed categorical variable\n\n\no_cat_embedding_dims\nlist\nNone\nlist of embedding dimensions for each observed categorical variable\n\n\no_cont_idxs\nlist\nNone\nlist of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n\n\npatch_len\nint\nNone\nNumber of time steps in each patch.\n\n\npatch_stride\nint\nNone\nStride of the patch.\n\n\nfusion_layers\nlist\n[128]\nlist of layer dimensions for the fusion MLP\n\n\nfusion_act\nstr\nrelu\nactivation function for the fusion MLP\n\n\nfusion_dropout\nfloat\n0.0\ndropout probability for the fusion MLP\n\n\nfusion_use_bn\nbool\nTrue\nboolean indicating whether to use batch normalization in the fusion MLP\n\n\nkwargs\n\n\n\n\n\n\n\nsource\n\n\nMultInputWrapper\n\n MultInputWrapper (arch, c_in:int=None, c_out:int=1, seq_len:int=None,\n d:tuple=None, dls:tsai.data.core.TSDataLoaders=None,\n s_cat_idxs:list=None, s_cat_embeddings:list=None,\n s_cat_embedding_dims:list=None, s_cont_idxs:list=None,\n o_cat_idxs:list=None, o_cat_embeddings:list=None,\n o_cat_embedding_dims:list=None, o_cont_idxs:list=None,\n patch_len:int=None, patch_stride:int=None,\n fusion_layers:list=128, fusion_act:str='relu',\n fusion_dropout:float=0.0, fusion_use_bn:bool=True,\n custom_head=None, **kwargs)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\narch\n\n\n\n\n\nc_in\nint\nNone\nnumber of input variables\n\n\nc_out\nint\n1\nnumber of output variables\n\n\nseq_len\nint\nNone\ninput sequence length\n\n\nd\ntuple\nNone\nshape of the output tensor\n\n\ndls\nTSDataLoaders\nNone\nTSDataLoaders object\n\n\ns_cat_idxs\nlist\nNone\nlist of indices for static categorical variables\n\n\ns_cat_embeddings\nlist\nNone\nlist of num_embeddings for each static categorical variable\n\n\ns_cat_embedding_dims\nlist\nNone\nlist of embedding dimensions for each static categorical variable\n\n\ns_cont_idxs\nlist\nNone\nlist of indices for static continuous variables\n\n\no_cat_idxs\nlist\nNone\nlist of indices for observed categorical variables\n\n\no_cat_embeddings\nlist\nNone\nlist of num_embeddings for each observed categorical variable\n\n\no_cat_embedding_dims\nlist\nNone\nlist of embedding dimensions for each observed categorical variable\n\n\no_cont_idxs\nlist\nNone\nlist of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n\n\npatch_len\nint\nNone\nNumber of time steps in each patch.\n\n\npatch_stride\nint\nNone\nStride of the patch.\n\n\nfusion_layers\nlist\n128\nlist of layer dimensions for the fusion MLP\n\n\nfusion_act\nstr\nrelu\nactivation function for the fusion MLP\n\n\nfusion_dropout\nfloat\n0.0\ndropout probability for the fusion MLP\n\n\nfusion_use_bn\nbool\nTrue\nboolean indicating whether to use batch normalization in the fusion MLP\n\n\ncustom_head\nNoneType\nNone\ncustom head to replace the default head\n\n\nkwargs\n\n\n\n\n\n\n\nfrom tsai.models.InceptionTimePlus import InceptionTimePlus\n\n\nbs = 8\nc_in = 6\nc_out = 3\nseq_len = 97\nd = None\n\ns_cat_idxs=2\ns_cont_idxs=4\no_cat_idxs=[0, 3]\no_cont_idxs=None\ns_cat_embeddings = 5\ns_cat_embedding_dims = None\no_cat_embeddings = [7, 3]\no_cat_embedding_dims = [3, None]\n\nfusion_layers = 128\n\nt0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\nt1 = torch.randn(bs, 1, seq_len)\nt2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\nt3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\nt4 = torch.randn(bs, 1, seq_len)\nt5 = torch.randn(bs, 1, seq_len)\n\nt = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n\npatch_lens = [None, 5, 5, 5, 5]\npatch_strides = [None, None, 1, 3, 5]\nfor patch_len, patch_stride in zip(patch_lens, patch_strides):\n for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n\n model = MultInputWrapper(\n arch=arch,\n c_in=c_in,\n c_out=c_out,\n seq_len=seq_len,\n d=d,\n s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n o_cont_idxs=o_cont_idxs,\n patch_len=patch_len,\n patch_stride=patch_stride,\n fusion_layers=fusion_layers,\n ).to(default_device())\n\n test_eq(model(t).shape, (bs, c_out))\n\narch: InceptionTimePlus, patch_len: None, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\narch: TSiTPlus, patch_len: None, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\narch: TSiTPlus, patch_len: 5, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: 1\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\narch: TSiTPlus, patch_len: 5, patch_stride: 1\narch: InceptionTimePlus, patch_len: 5, patch_stride: 3\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\narch: TSiTPlus, patch_len: 5, patch_stride: 3\narch: InceptionTimePlus, patch_len: 5, patch_stride: 5\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\narch: TSiTPlus, patch_len: 5, patch_stride: 5\n\n\n\nbs = 8\nc_in = 6\nc_out = 3\nseq_len = 97\nd = None\n\ns_cat_idxs=None\ns_cont_idxs=4\no_cat_idxs=[0, 3]\no_cont_idxs=None\ns_cat_embeddings = None\ns_cat_embedding_dims = None\no_cat_embeddings = [7, 3]\no_cat_embedding_dims = [3, None]\n\nfusion_layers = 128\n\nt0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\nt1 = torch.randn(bs, 1, seq_len)\nt2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\nt3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\nt4 = torch.randn(bs, 1, seq_len)\nt5 = torch.randn(bs, 1, seq_len)\n\nt = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n\npatch_lens = [None, 5, 5, 5, 5]\npatch_strides = [None, None, 1, 3, 5]\nfor patch_len, patch_stride in zip(patch_lens, patch_strides):\n for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n\n model = MultInputWrapper(\n arch=arch,\n c_in=c_in,\n c_out=c_out,\n seq_len=seq_len,\n d=d,\n s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n o_cont_idxs=o_cont_idxs,\n patch_len=patch_len,\n patch_stride=patch_stride,\n fusion_layers=fusion_layers,\n ).to(default_device())\n\n test_eq(model(t).shape, (bs, c_out))\n\narch: InceptionTimePlus, patch_len: None, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\narch: TSiTPlus, patch_len: None, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\narch: TSiTPlus, patch_len: 5, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: 1\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\narch: TSiTPlus, patch_len: 5, patch_stride: 1\narch: InceptionTimePlus, patch_len: 5, patch_stride: 3\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\narch: TSiTPlus, patch_len: 5, patch_stride: 3\narch: InceptionTimePlus, patch_len: 5, patch_stride: 5\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\narch: TSiTPlus, patch_len: 5, patch_stride: 5\n\n\n\nbs = 8\nc_in = 6\nc_out = 3\nseq_len = 97\nd = None\n\ns_cat_idxs=2\ns_cont_idxs=4\no_cat_idxs=None\no_cont_idxs=None\ns_cat_embeddings = 5\ns_cat_embedding_dims = None\no_cat_embeddings = None\no_cat_embedding_dims = None\n\nfusion_layers = 128\n\nt0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\nt1 = torch.randn(bs, 1, seq_len)\nt2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\nt3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\nt4 = torch.randn(bs, 1, seq_len)\nt5 = torch.randn(bs, 1, seq_len)\n\nt = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n\npatch_lens = [None, 5, 5, 5, 5]\npatch_strides = [None, None, 1, 3, 5]\nfor patch_len, patch_stride in zip(patch_lens, patch_strides):\n for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n\n model = MultInputWrapper(\n arch=arch,\n c_in=c_in,\n c_out=c_out,\n seq_len=seq_len,\n d=d,\n s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n o_cont_idxs=o_cont_idxs,\n patch_len=patch_len,\n patch_stride=patch_stride,\n fusion_layers=fusion_layers,\n ).to(default_device())\n\n test_eq(model(t).shape, (bs, c_out))\n\narch: InceptionTimePlus, patch_len: None, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\narch: TSiTPlus, patch_len: None, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\narch: TSiTPlus, patch_len: 5, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: 1\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\narch: TSiTPlus, patch_len: 5, patch_stride: 1\narch: InceptionTimePlus, patch_len: 5, patch_stride: 3\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\narch: TSiTPlus, patch_len: 5, patch_stride: 3\narch: InceptionTimePlus, patch_len: 5, patch_stride: 5\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\narch: TSiTPlus, patch_len: 5, patch_stride: 5\n\n\n\nbs = 8\nc_in = 6\nc_out = 3\nseq_len = 97\nd = None\n\ns_cat_idxs=None\ns_cont_idxs=None\no_cat_idxs=None\no_cont_idxs=None\ns_cat_embeddings = None\ns_cat_embedding_dims = None\no_cat_embeddings = None\no_cat_embedding_dims = None\n\nfusion_layers = 128\n\nt0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\nt1 = torch.randn(bs, 1, seq_len)\nt2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\nt3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\nt4 = torch.randn(bs, 1, seq_len)\nt5 = torch.randn(bs, 1, seq_len)\n\nt = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n\npatch_lens = [None, 5, 5, 5, 5]\npatch_strides = [None, None, 1, 3, 5]\nfor patch_len, patch_stride in zip(patch_lens, patch_strides):\n for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n\n model = MultInputWrapper(\n arch=arch,\n c_in=c_in,\n c_out=c_out,\n seq_len=seq_len,\n d=d,\n s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n s_cont_idxs=s_cont_idxs,\n o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n o_cont_idxs=o_cont_idxs,\n patch_len=patch_len,\n patch_stride=patch_stride,\n fusion_layers=fusion_layers,\n ).to(default_device())\n\n test_eq(model(t).shape, (bs, c_out))\n\narch: InceptionTimePlus, patch_len: None, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\narch: TSiTPlus, patch_len: None, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: None\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\narch: TSiTPlus, patch_len: 5, patch_stride: None\narch: InceptionTimePlus, patch_len: 5, patch_stride: 1\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\narch: TSiTPlus, patch_len: 5, patch_stride: 1\narch: InceptionTimePlus, patch_len: 5, patch_stride: 3\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\narch: TSiTPlus, patch_len: 5, patch_stride: 3\narch: InceptionTimePlus, patch_len: 5, patch_stride: 5\narch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\narch: TSiTPlus, patch_len: 5, patch_stride: 5", + "crumbs": [ + "Models", + "Miscellaneous", + "Multimodal" + ] + }, + { + "objectID": "models.inceptiontime.html", + "href": "models.inceptiontime.html", + "title": "InceptionTime", + "section": "", + "text": "An ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture\n\nThis is an unofficial PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:\nFawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.\nOfficial InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime\n\nsource\n\nInceptionTime\n\n InceptionTime (c_in, c_out, seq_len=None, nf=32, nb_filters=None, ks=40,\n bottleneck=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nInceptionBlock\n\n InceptionBlock (ni, nf=32, residual=True, depth=6, ks=40,\n bottleneck=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nInceptionModule\n\n InceptionModule (ni, nf, ks=40, bottleneck=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.models.utils import count_parameters\n\n\nbs = 16\nvars = 1\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, vars, seq_len)\ntest_eq(InceptionTime(vars,c_out)(xb).shape, [bs, c_out])\ntest_eq(InceptionTime(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])\ntest_eq(InceptionTime(vars,c_out, residual=False)(xb).shape, [bs, c_out])\ntest_eq(count_parameters(InceptionTime(3, 2)), 455490)\n\n\nInceptionTime(3,2)\n\nInceptionTime(\n (inceptionblock): InceptionBlock(\n (inception): ModuleList(\n (0): InceptionModule(\n (bottleneck): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (1): InceptionModule(\n (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (2): InceptionModule(\n (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (3): InceptionModule(\n (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (4): InceptionModule(\n (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n (5): InceptionModule(\n (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)\n (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)\n (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (act): ReLU()\n )\n )\n (shortcut): ModuleList(\n (0): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (fc): Linear(in_features=128, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "CNNs", + "InceptionTime" + ] + }, + { + "objectID": "models.patchtst.html", + "href": "models.patchtst.html", + "title": "PatchTST", + "section": "", + "text": "This is an unofficial PyTorch implementation of PatchTST created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:\nIn this notebook, we are going to use a new state-of-the-art model called PatchTST (Nie et al, 2022) to create a long-term time series forecast.\nHere are some paper details:\n\nNie, Y., Nguyen, N. H., Sinthong, P., & Kalagnanam, J. (2022). A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. arXiv preprint arXiv:2211.14730.\nOfficial implementation:: https://github.com/yuqinie98/PatchTST\n\n@article{Yuqietal-2022-PatchTST,\n title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers},\n author={Yuqi Nie and \n Nam H. Nguyen and \n Phanwadee Sinthong and \n Jayant Kalagnanam},\n journal={arXiv preprint arXiv:2211.14730},\n year={2022}\n}\nPatchTST has shown some impressive results across some of the most widely used long-term datasets for benchmarking:\n\n\n\nimage.png\n\n\n\nsource\n\nSeriesDecomposition\n\n SeriesDecomposition (kernel_size:int)\n\nSeries decomposition block\n\n\n\n\nType\nDetails\n\n\n\n\nkernel_size\nint\nthe size of the window\n\n\n\n\nsource\n\n\nMovingAverage\n\n MovingAverage (kernel_size:int)\n\nMoving average block to highlight the trend of time series\n\n\n\n\nType\nDetails\n\n\n\n\nkernel_size\nint\nthe size of the window\n\n\n\n\nsource\n\n\nFlatten_Head\n\n Flatten_Head (individual, n_vars, nf, pred_dim)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nPatchTST\n\n PatchTST (c_in, c_out, seq_len, pred_dim=None, n_layers=2, n_heads=8,\n d_model=512, d_ff=2048, dropout=0.05, attn_dropout=0.0,\n patch_len=16, stride=8, padding_patch=True, revin=True,\n affine=False, individual=False, subtract_last=False,\n decomposition=False, kernel_size=25, activation='gelu',\n norm='BatchNorm', pre_norm=False, res_attention=True,\n store_attn=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\nnumber of input channels\n\n\nc_out\n\n\nused for compatibility\n\n\nseq_len\n\n\ninput sequence length\n\n\npred_dim\nNoneType\nNone\nprediction sequence length\n\n\nn_layers\nint\n2\nnumber of encoder layers\n\n\nn_heads\nint\n8\nnumber of heads\n\n\nd_model\nint\n512\ndimension of model\n\n\nd_ff\nint\n2048\ndimension of fully connected network (fcn)\n\n\ndropout\nfloat\n0.05\ndropout applied to all linear layers in the encoder\n\n\nattn_dropout\nfloat\n0.0\ndropout applied to the attention scores\n\n\npatch_len\nint\n16\npatch_len\n\n\nstride\nint\n8\nstride\n\n\npadding_patch\nbool\nTrue\nflag to indicate if padded is added if necessary\n\n\nrevin\nbool\nTrue\nRevIN\n\n\naffine\nbool\nFalse\nRevIN affine\n\n\nindividual\nbool\nFalse\nindividual head\n\n\nsubtract_last\nbool\nFalse\nsubtract_last\n\n\ndecomposition\nbool\nFalse\napply decomposition\n\n\nkernel_size\nint\n25\ndecomposition kernel size\n\n\nactivation\nstr\ngelu\nactivation function of intermediate layer, relu or gelu.\n\n\nnorm\nstr\nBatchNorm\ntype of normalization layer used in the encoder\n\n\npre_norm\nbool\nFalse\nflag to indicate if normalization is applied as the first step in the sublayers\n\n\nres_attention\nbool\nTrue\nflag to indicate if Residual MultiheadAttention should be used\n\n\nstore_attn\nbool\nFalse\ncan be used to visualize attention weights\n\n\n\n\nfrom fastcore.test import test_eq\nfrom tsai.models.utils import count_parameters\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 1\nseq_len = 60\npred_dim = 20\n\nxb = torch.randn(bs, c_in, seq_len)\n\narch_config=dict(\n n_layers=3, # number of encoder layers\n n_heads=16, # number of heads\n d_model=128, # dimension of model\n d_ff=256, # dimension of fully connected network (fcn)\n attn_dropout=0.,\n dropout=0.2, # dropout applied to all linear layers in the encoder\n patch_len=16, # patch_len\n stride=8, # stride\n )\n\nmodel = PatchTST(c_in, c_out, seq_len, pred_dim, **arch_config)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_in, pred_dim])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 418470\n\n\n\n\nTest conversion to Torchscript\n\nimport gc\nimport os\nimport torch\nimport torch.nn as nn\nfrom fastcore.test import test_eq, test_close\n\n\nbs = 1\nnew_bs = 8\nc_in = 3\nc_out = 1\nseq_len = 96\npred_dim = 20\n\n# module\nmodel = PatchTST(c_in, c_out, seq_len, pred_dim)\nmodel = model.eval()\n\n# input data\ninp = torch.rand(bs, c_in, seq_len)\nnew_inp = torch.rand(new_bs, c_in, seq_len)\n\n# original\ntry:\n output = model(inp)\n new_output = model(new_inp)\n print(f'{\"original\":10}: ok')\nexcept:\n print(f'{\"original\":10}: failed')\n\n# tracing\ntry:\n traced_model = torch.jit.trace(model, inp)\n file_path = f\"_test_traced_model.pt\"\n torch.jit.save(traced_model, file_path)\n traced_model = torch.jit.load(file_path)\n test_eq(output, traced_model(inp))\n test_eq(new_output, traced_model(new_inp))\n os.remove(file_path)\n del traced_model\n gc.collect()\n print(f'{\"tracing\":10}: ok')\nexcept:\n print(f'{\"tracing\":10}: failed')\n\n# scripting\ntry:\n scripted_model = torch.jit.script(model)\n file_path = f\"_test_scripted_model.pt\"\n torch.jit.save(scripted_model, file_path)\n scripted_model = torch.jit.load(file_path)\n test_eq(output, scripted_model(inp))\n test_eq(new_output, scripted_model(new_inp))\n os.remove(file_path)\n del scripted_model\n gc.collect()\n print(f'{\"scripting\":10}: ok')\nexcept:\n print(f'{\"scripting\":10}: failed')\n\noriginal : ok\ntracing : ok\nscripting : failed\n\n\n\n\nTest conversion to onnx\n\ntry:\n import onnx\n import onnxruntime as ort\n \n try:\n file_path = \"_model_cpu.onnx\"\n torch.onnx.export(model.cpu(), # model being run\n inp, # model input (or a tuple for multiple inputs)\n file_path, # where to save the model (can be a file or file-like object)\n input_names = ['input'], # the model's input names\n output_names = ['output'], # the model's output names\n dynamic_axes={\n 'input' : {0 : 'batch_size'}, \n 'output' : {0 : 'batch_size'}} # variable length axes\n )\n\n\n # Load the model and check it's ok\n onnx_model = onnx.load(file_path)\n onnx.checker.check_model(onnx_model)\n del onnx_model\n gc.collect()\n\n # New session\n ort_sess = ort.InferenceSession(file_path)\n output_onnx = ort_sess.run(None, {'input': inp.numpy()})[0]\n test_close(output.detach().numpy(), output_onnx)\n new_output_onnx = ort_sess.run(None, {'input': new_inp.numpy()})[0]\n test_close(new_output.detach().numpy(), new_output_onnx)\n os.remove(file_path)\n print(f'{\"onnx\":10}: ok')\n except:\n print(f'{\"onnx\":10}: failed')\n\nexcept ImportError:\n print('onnx and onnxruntime are not installed. Please install them to run this test')\n\nonnx and onnxruntime are not installed. Please install them to run this test", + "crumbs": [ + "Models", + "Transformers", + "PatchTST" + ] + }, + { + "objectID": "models.rescnn.html", + "href": "models.rescnn.html", + "title": "ResCNN", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nfrom tsai.models.utils import *\n\n\nsource\n\nResCNN\n\n ResCNN (c_in, c_out, coord=False, separable=False, zero_norm=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nxb = torch.rand(16, 3, 10)\ntest_eq(ResCNN(3,2,coord=True, separable=True)(xb).shape, [xb.shape[0], 2])\ntest_eq(count_parameters(ResCNN(3,2)), 257283)\n\n\nResCNN(3,2,coord=True, separable=True)\n\nResCNN(\n (block1): _ResCNNBlock(\n (convblock1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(4, 4, kernel_size=(7,), stride=(1,), padding=(3,), groups=4, bias=False)\n (pointwise_conv): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(5,), stride=(1,), padding=(2,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock3): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (block2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.2)\n )\n (block3): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 256, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): PReLU(num_parameters=1)\n )\n (block4): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(257, 257, kernel_size=(3,), stride=(1,), padding=(1,), groups=257, bias=False)\n (pointwise_conv): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ELU(alpha=0.3)\n )\n (gap): AdaptiveAvgPool1d(output_size=1)\n (squeeze): Squeeze(dim=-1)\n (lin): Linear(in_features=128, out_features=2, bias=True)\n)\n\n\n\ncheck_weight(ResCNN(3,2, zero_norm=True), is_bn)\n\n(array([1., 1., 0., 1., 1., 1., 1.], dtype=float32),\n array([0., 0., 0., 0., 0., 0., 0.], dtype=float32))", + "crumbs": [ + "Models", + "CNNs", + "ResCNN" + ] + }, + { + "objectID": "models.rnnattention.html", + "href": "models.rnnattention.html", + "title": "RNNAttention", + "section": "", + "text": "This is an custom PyTorch implementation by @yangtzech, based on TST implementation of Ignacio Oguiza.", + "crumbs": [ + "Models", + "RNNs", + "RNNAttention" + ] + }, + { + "objectID": "models.rnnattention.html#arguments", + "href": "models.rnnattention.html#arguments", + "title": "RNNAttention", + "section": "Arguments", + "text": "Arguments\nUsual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers. And some parameters are necessary for the RNN part.\nThe default values are the ones selected as a default configuration in the latter.\n\nc_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var\nc_out: the number of target classes. dls.c\nseq_len: number of time steps in the time series. dls.len\nhidden_size: the number of features in the hidden state in the RNN model. Default: 128.\nrnn_layers: the number of recurrent layers of the RNN model. Default: 1.\nbias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True\nrnn_dropout: If non-zero, introduces a Dropout layer on the outputs of each RNN layer except the last layer, with dropout probability equal to :attr:rnn_dropout. Default: 0\nbidirectional: If True, becomes a bidirectional RNN. Default: False\nn_heads: parallel attention heads. Usual values: 8-16. Default: 16.\nd_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.\nencoder_dropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.\nact: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.\nencoder_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.\nfc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.\ny_range: range of possible y values (used in regression tasks). Default: None\nkwargs: nn.Conv1d kwargs. If not {}, a nn.Conv1d with those kwargs will be applied to original time series.", + "crumbs": [ + "Models", + "RNNs", + "RNNAttention" + ] + }, + { + "objectID": "models.rnnattention.html#imports", + "href": "models.rnnattention.html#imports", + "title": "RNNAttention", + "section": "Imports", + "text": "Imports", + "crumbs": [ + "Models", + "RNNs", + "RNNAttention" + ] + }, + { + "objectID": "models.rnnattention.html#rnnattention", + "href": "models.rnnattention.html#rnnattention", + "title": "RNNAttention", + "section": "RNNAttention", + "text": "RNNAttention\n\nt = torch.rand(16, 50, 128)\noutput, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)\noutput.shape, attn.shape\n\n(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))\n\n\n\nt = torch.rand(16, 50, 128)\noutput = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)\noutput.shape\n\ntorch.Size([16, 50, 128])\n\n\n\nsource\n\nGRUAttention\n\n GRUAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,\n rnn_layers=1, bias=True, rnn_dropout=0,\n bidirectional=False, encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',\n fc_dropout:float=0.0, y_range:Optional[tuple]=None,\n verbose:bool=False, custom_head=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nLSTMAttention\n\n LSTMAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,\n rnn_layers=1, bias=True, rnn_dropout=0,\n bidirectional=False, encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',\n fc_dropout:float=0.0, y_range:Optional[tuple]=None,\n verbose:bool=False, custom_head=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nRNNAttention\n\n RNNAttention (c_in:int, c_out:int, seq_len:int, hidden_size=128,\n rnn_layers=1, bias=True, rnn_dropout=0,\n bidirectional=False, encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1, act:str='gelu',\n fc_dropout:float=0.0, y_range:Optional[tuple]=None,\n verbose:bool=False, custom_head=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 500\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nhidden_size = 128\nrnn_layers=1\nbias=True\nrnn_dropout=0\nbidirectional=False\nencoder_layers=3\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\nencoder_dropout = 0.1\nact = \"gelu\"\nfc_dropout = 0.1\nkwargs = {}\n\nmodel = RNNAttention(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,\n encoder_layers=encoder_layers, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, \n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 541698\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nhidden_size = 128\nrnn_layers=1\nbias=True\nrnn_dropout=0\nbidirectional=False\nencoder_layers=3\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\nencoder_dropout = 0.1\nact = \"gelu\"\nfc_dropout = 0.1\nkwargs = {}\n# kwargs = dict(kernel_size=5, padding=2)\n\nmodel = RNNAttention(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,\n encoder_layers=encoder_layers, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, \n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 429058", + "crumbs": [ + "Models", + "RNNs", + "RNNAttention" + ] + }, + { + "objectID": "models.layers.html", + "href": "models.layers.html", + "title": "Layers", + "section": "", + "text": "Helper functions used to build PyTorch timeseries models.\n\n\nsource\n\ntest_module_to_torchscript\n\n test_module_to_torchscript (m:torch.nn.modules.module.Module,\n inputs:torch.Tensor, trace:bool=True,\n script:bool=True, serialize:bool=True,\n verbose:bool=True)\n\nTests if a PyTorch module can be correctly traced or scripted and serialized\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nm\nModule\n\nThe PyTorch module to be tested.\n\n\ninputs\nTensor\n\nA tensor or tuple of tensors representing the inputs to the model.\n\n\ntrace\nbool\nTrue\nIf True, attempts to trace the model. Defaults to True.\n\n\nscript\nbool\nTrue\nIf True, attempts to script the model. Defaults to True.\n\n\nserialize\nbool\nTrue\nIf True, saves and loads the traced/scripted module to ensure it can be serialized. Defaults to True.\n\n\nverbose\nbool\nTrue\nIf True, prints detailed information about the tracing and scripting process. Defaults to True.\n\n\n\n\nm = nn.Linear(10, 2)\ninp = torch.randn(3, 10)\ntest_module_to_torchscript(m, inp, trace=True, script=True, serialize=True, verbose=True)\n\noutput.shape: torch.Size([3, 2])\nTracing...\n...Linear has been successfully traced 😃\n\n\n\nTrue\n\n\n\nsource\n\n\ninit_lin_zero\n\n init_lin_zero (m)\n\n\nsource\n\n\nSwishBeta\n\n SwishBeta ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nSmeLU\n\n SmeLU (beta:float=2.0)\n\nSmooth ReLU activation function based on https://arxiv.org/pdf/2202.06499.pdf\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nbeta\nfloat\n2.0\nBeta value\n\n\nReturns\nNone\n\n\n\n\n\n\nsource\n\n\nChomp1d\n\n Chomp1d (chomp_size)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nSameConv1d\n\n SameConv1d (ni, nf, ks=3, stride=1, dilation=1, **kwargs)\n\nConv1d with padding=‘same’\n\nsource\n\n\nPad1d\n\n Pad1d (padding, value=0.0)\n\nPads the input tensor boundaries with a constant value.\nFor N-dimensional padding, use :func:torch.nn.functional.pad().\nArgs: padding (int, tuple): the size of the padding. If is int, uses the same padding in both boundaries. If a 2-tuple, uses (:math:\\text{padding\\_left}, :math:\\text{padding\\_right})\nShape: - Input: :math:(C, W_{in}) or :math:(N, C, W_{in}). - Output: :math:(C, W_{out}) or :math:(N, C, W_{out}), where\n :math:`W_{out} = W_{in} + \\text{padding\\_left} + \\text{padding\\_right}`\nExamples::\n>>> # xdoctest: +IGNORE_WANT(\"non-deterministic\")\n>>> m = nn.ConstantPad1d(2, 3.5)\n>>> input = torch.randn(1, 2, 4)\n>>> input\ntensor([[[-1.0491, -0.7152, -0.0749, 0.8530],\n [-1.3287, 1.8966, 0.1466, -0.2771]]])\n>>> m(input)\ntensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,\n 3.5000],\n [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,\n 3.5000]]])\n>>> m = nn.ConstantPad1d(2, 3.5)\n>>> input = torch.randn(1, 2, 3)\n>>> input\ntensor([[[ 1.6616, 1.4523, -1.1255],\n [-3.6372, 0.1182, -1.8652]]])\n>>> m(input)\ntensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],\n [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])\n>>> # using different paddings for different sides\n>>> m = nn.ConstantPad1d((3, 1), 3.5)\n>>> m(input)\ntensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],\n [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])\n\nsource\n\n\nsame_padding1d\n\n same_padding1d (seq_len, ks, stride=1, dilation=1)\n\nSame padding formula as used in Tensorflow\n\nsource\n\n\nConv2d\n\n Conv2d (ni, nf, kernel_size=None, ks=None, stride=1, padding='same',\n dilation=1, init='auto', bias_std=0.01, **kwargs)\n\nconv1d layer with padding=‘same’, ‘valid’, or any integer (defaults to ‘same’)\n\nsource\n\n\nConv2dSame\n\n Conv2dSame (ni, nf, ks=(3, 3), stride=(1, 1), dilation=(1, 1), **kwargs)\n\nConv2d with padding=‘same’\n\nsource\n\n\nPad2d\n\n Pad2d (padding, value=0.0)\n\nPads the input tensor boundaries with a constant value.\nFor N-dimensional padding, use :func:torch.nn.functional.pad().\nArgs: padding (int, tuple): the size of the padding. If is int, uses the same padding in all boundaries. If a 4-tuple, uses (:math:\\text{padding\\_left}, :math:\\text{padding\\_right}, :math:\\text{padding\\_top}, :math:\\text{padding\\_bottom})\nShape: - Input: :math:(N, C, H_{in}, W_{in}) or :math:(C, H_{in}, W_{in}). - Output: :math:(N, C, H_{out}, W_{out}) or :math:(C, H_{out}, W_{out}), where\n :math:`H_{out} = H_{in} + \\text{padding\\_top} + \\text{padding\\_bottom}`\n\n :math:`W_{out} = W_{in} + \\text{padding\\_left} + \\text{padding\\_right}`\nExamples::\n>>> # xdoctest: +IGNORE_WANT(\"non-deterministic\")\n>>> m = nn.ConstantPad2d(2, 3.5)\n>>> input = torch.randn(1, 2, 2)\n>>> input\ntensor([[[ 1.6585, 0.4320],\n [-0.8701, -0.4649]]])\n>>> m(input)\ntensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],\n [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])\n>>> # using different paddings for different sides\n>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)\n>>> m(input)\ntensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],\n [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],\n [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],\n [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])\n\nsource\n\n\nsame_padding2d\n\n same_padding2d (H, W, ks, stride=(1, 1), dilation=(1, 1))\n\nSame padding formula as used in Tensorflow\n\nbs = 2\nc_in = 3\nc_out = 5\nh = 16\nw = 20\nt = torch.rand(bs, c_in, h, w)\ntest_eq(Conv2dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))\ntest_eq(Conv2dSame(c_in, c_out, ks=(3, 1), stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))\ntest_eq(Conv2dSame(c_in, c_out, ks=3, stride=(1, 1), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h, w))\ntest_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(1, 1), bias=False)(t).shape, (bs, c_out, h//2, w//2))\ntest_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h//2, w//2))\ntest_eq(Conv2d(c_in, c_out, ks=3, padding='same', stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))\n\n\nsource\n\n\nCausalConv1d\n\n CausalConv1d (ni, nf, ks, stride=1, dilation=1, groups=1, bias=True)\n\nApplies a 1D convolution over an input signal composed of several input planes.\nIn the simplest case, the output value of the layer with input size :math:(N, C_{\\text{in}}, L) and output :math:(N, C_{\\text{out}}, L_{\\text{out}}) can be precisely described as:\n.. math:: (N_i, C_{j}) = (C{j}) + {k = 0}^{C_{in} - 1} (C_{_j}, k) (N_i, k)\nwhere :math:\\star is the valid cross-correlation_ operator, :math:N is a batch size, :math:C denotes a number of channels, :math:L is a length of signal sequence.\nThis module supports :ref:TensorFloat32<tf32_on_ampere>.\nOn certain ROCm devices, when using float16 inputs this module will use :ref:different precision<fp16_on_mi200> for backward.\n\n:attr:stride controls the stride for the cross-correlation, a single number or a one-element tuple.\n:attr:padding controls the amount of padding applied to the input. It can be either a string {‘valid’, ‘same’} or a tuple of ints giving the amount of implicit padding applied on both sides.\n:attr:dilation controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this link_ has a nice visualization of what :attr:dilation does.\n:attr:groups controls the connections between inputs and outputs. :attr:in_channels and :attr:out_channels must both be divisible by :attr:groups. For example,\n\nAt groups=1, all inputs are convolved to all outputs.\nAt groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels and producing half the output channels, and both subsequently concatenated.\nAt groups= :attr:in_channels, each input channel is convolved with its own set of filters (of size :math:\\frac{\\text{out\\_channels}}{\\text{in\\_channels}}).\n\n\nNote: When groups == in_channels and out_channels == K * in_channels, where K is a positive integer, this operation is also known as a “depthwise convolution”.\nIn other words, for an input of size :math:`(N, C_{in}, L_{in})`,\na depthwise convolution with a depthwise multiplier `K` can be performed with the arguments\n:math:`(C_\\text{in}=C_\\text{in}, C_\\text{out}=C_\\text{in} \\times \\text{K}, ..., \\text{groups}=C_\\text{in})`.\nNote: In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting torch.backends.cudnn.deterministic = True. See :doc:/notes/randomness for more information.\nNote: padding='valid' is the same as no padding. padding='same' pads the input so the output has the shape as the input. However, this mode doesn’t support any stride values other than 1.\nNote: This module supports complex data types i.e. complex32, complex64, complex128.\nArgs: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int, tuple or str, optional): Padding added to both sides of the input. Default: 0 padding_mode (str, optional): 'zeros', 'reflect', 'replicate' or 'circular'. Default: 'zeros' dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If True, adds a learnable bias to the output. Default: True\nShape: - Input: :math:(N, C_{in}, L_{in}) or :math:(C_{in}, L_{in}) - Output: :math:(N, C_{out}, L_{out}) or :math:(C_{out}, L_{out}), where\n .. math::\n L_{out} = \\left\\lfloor\\frac{L_{in} + 2 \\times \\text{padding} - \\text{dilation}\n \\times (\\text{kernel\\_size} - 1) - 1}{\\text{stride}} + 1\\right\\rfloor\nAttributes: weight (Tensor): the learnable weights of the module of shape :math:(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}}, \\text{kernel\\_size}). The values of these weights are sampled from :math:\\mathcal{U}(-\\sqrt{k}, \\sqrt{k}) where :math:k = \\frac{groups}{C_\\text{in} * \\text{kernel\\_size}} bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:bias is True, then the values of these weights are sampled from :math:\\mathcal{U}(-\\sqrt{k}, \\sqrt{k}) where :math:k = \\frac{groups}{C_\\text{in} * \\text{kernel\\_size}}\nExamples::\n>>> m = nn.Conv1d(16, 33, 3, stride=2)\n>>> input = torch.randn(20, 16, 50)\n>>> output = m(input)\n.. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation\n.. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n\nsource\n\n\nConv1d\n\n Conv1d (ni, nf, kernel_size=None, ks=None, stride=1, padding='same',\n dilation=1, init='auto', bias_std=0.01, **kwargs)\n\nconv1d layer with padding=‘same’, ‘causal’, ‘valid’, or any integer (defaults to ‘same’)\n\nbs = 2\nc_in = 3\nc_out = 5\nseq_len = 512\nt = torch.rand(bs, c_in, seq_len)\ndilation = 1\ntest_eq(CausalConv1d(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1d(c_in, c_out, ks=3, padding=\"same\", dilation=dilation)(t).shape)\ndilation = 2\ntest_eq(CausalConv1d(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1d(c_in, c_out, ks=3, padding=\"same\", dilation=dilation)(t).shape)\n\n\nbs = 2\nni = 3\nnf = 5\nseq_len = 6\nks = 3\nt = torch.rand(bs, c_in, seq_len)\ntest_eq(Conv1d(ni, nf, ks, padding=0)(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))\ntest_eq(Conv1d(ni, nf, ks, padding='valid')(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))\ntest_eq(Conv1d(ni, nf, ks, padding='same')(t).shape, (bs, c_out, seq_len))\ntest_eq(Conv1d(ni, nf, ks, padding='causal')(t).shape, (bs, c_out, seq_len))\ntest_error('use kernel_size or ks but not both simultaneously', Conv1d, ni, nf, kernel_size=3, ks=3)\ntest_error('you need to pass a ks', Conv1d, ni, nf)\n\n\nconv = Conv1d(ni, nf, ks, padding='same')\ninit_linear(conv, None, init='auto', bias_std=.01)\nconv\n\nConv1d(3, 5, kernel_size=(3,), stride=(1,), padding=(1,))\n\n\n\nconv = Conv1d(ni, nf, ks, padding='causal')\ninit_linear(conv, None, init='auto', bias_std=.01)\nconv\n\nCausalConv1d(3, 5, kernel_size=(3,), stride=(1,))\n\n\n\nconv = Conv1d(ni, nf, ks, padding='valid')\ninit_linear(conv, None, init='auto', bias_std=.01)\nweight_norm(conv)\nconv\n\nConv1d(3, 5, kernel_size=(3,), stride=(1,))\n\n\n\nconv = Conv1d(ni, nf, ks, padding=0)\ninit_linear(conv, None, init='auto', bias_std=.01)\nweight_norm(conv)\nconv\n\nConv1d(3, 5, kernel_size=(3,), stride=(1,))\n\n\n\nsource\n\n\nSeparableConv1d\n\n SeparableConv1d (ni, nf, ks, stride=1, padding='same', dilation=1,\n bias=True, bias_std=0.01)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 64\nc_in = 6\nc_out = 5\nseq_len = 512\nt = torch.rand(bs, c_in, seq_len)\ntest_eq(SeparableConv1d(c_in, c_out, 3)(t).shape, (bs, c_out, seq_len))\n\n\nsource\n\n\nAddCoords1d\n\n AddCoords1d ()\n\nAdd coordinates to ease position identification without modifying mean and std\n\nbs = 2\nc_in = 3\nc_out = 5\nseq_len = 50\n\nt = torch.rand(bs, c_in, seq_len)\nt = (t - t.mean()) / t.std()\ntest_eq(AddCoords1d()(t).shape, (bs, c_in + 1, seq_len))\nnew_t = AddCoords1d()(t)\ntest_close(new_t.mean(),0, 1e-2)\ntest_close(new_t.std(), 1, 1e-2)\n\n\nsource\n\n\nConvBlock\n\n ConvBlock (ni, nf, kernel_size=None, ks=3, stride=1, padding='same',\n bias=None, bias_std=0.01, norm='Batch', zero_norm=False,\n bn_1st=True, act=<class 'torch.nn.modules.activation.ReLU'>,\n act_kwargs={}, init='auto', dropout=0.0, xtra=None,\n coord=False, separable=False, **kwargs)\n\nCreate a sequence of conv1d (ni to nf), activation (if act_cls) and norm_type layers.\n\nsource\n\n\nResBlock1dPlus\n\n ResBlock1dPlus (expansion, ni, nf, coord=False, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm='Batch', zero_norm=True,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>, ks=3,\n pool=<function AvgPool>, pool_first=True, **kwargs)\n\nResnet block from ni to nh with stride\n\nsource\n\n\nSEModule1d\n\n SEModule1d (ni, reduction=16, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nSqueeze and excitation module for 1d\n\nt = torch.rand(8, 32, 12)\ntest_eq(SEModule1d(t.shape[1], 16, act=nn.ReLU, act_kwargs={})(t).shape, t.shape)\n\n\nsource\n\n\nNorm\n\n Norm (nf, ndim=1, norm='Batch', zero_norm=False, init=True, **kwargs)\n\nNorm layer with nf features and ndim with auto init.\n\nbs = 2\nni = 3\nnf = 5\nsl = 4\nks = 5\n\nt = torch.rand(bs, ni, sl)\ntest_eq(ConvBlock(ni, nf, ks)(t).shape, (bs, nf, sl))\ntest_eq(ConvBlock(ni, nf, ks, padding='causal')(t).shape, (bs, nf, sl))\ntest_eq(ConvBlock(ni, nf, ks, coord=True)(t).shape, (bs, nf, sl))\n\n\ntest_eq(BN1d(ni)(t).shape, (bs, ni, sl))\ntest_eq(BN1d(ni).weight.data.mean().item(), 1.)\ntest_eq(BN1d(ni, zero_norm=True).weight.data.mean().item(), 0.)\n\n\ntest_eq(ConvBlock(ni, nf, ks, norm='batch', zero_norm=True)[1].weight.data.unique().item(), 0)\ntest_ne(ConvBlock(ni, nf, ks, norm='batch', zero_norm=False)[1].weight.data.unique().item(), 0)\ntest_eq(ConvBlock(ni, nf, ks, bias=False)[0].bias, None)\nConvBlock(ni, nf, ks, act=Swish, coord=True)\n\nConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 5, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (2): BatchNorm1d(5, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Swish()\n)\n\n\n\nsource\n\n\nLinLnDrop\n\n LinLnDrop (n_in, n_out, ln=True, p=0.0, act=None, lin_first=False)\n\nModule grouping LayerNorm1d, Dropout and Linear layers\n\nLinLnDrop(2, 3, p=.5)\n\nLinLnDrop(\n (0): LayerNorm((2,), eps=1e-05, elementwise_affine=True)\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=2, out_features=3, bias=False)\n)\n\n\n\nsource\n\n\nLambdaPlus\n\n LambdaPlus (func, *args, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nReZero\n\n ReZero (module)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nClip\n\n Clip (min=None, max=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nClamp\n\n Clamp (min=None, max=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nSoftMax\n\n SoftMax (dim=-1)\n\nSoftMax layer\n\nsource\n\n\nLastStep\n\n LastStep ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nMax\n\n Max (dim=None, keepdim=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nReshape\n\n Reshape (*shape)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nView\n\n View (*shape)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nTranspose\n\n Transpose (*dims, contiguous=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nPermute\n\n Permute (*dims)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nUnfold\n\n Unfold (dim, size, step=1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nConcat\n\n Concat (dim=1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nAdd\n\n Add ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nUnsqueeze\n\n Unsqueeze (dim=-1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nSqueeze\n\n Squeeze (dim=-1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 2\nnf = 5\nsl = 4\n\nt = torch.rand(bs, nf, sl)\ntest_eq(Permute(0,2,1)(t).shape, (bs, sl, nf))\ntest_eq(Max(1)(t).shape, (bs, sl))\ntest_eq(Transpose(1,2)(t).shape, (bs, sl, nf))\ntest_eq(Transpose(1,2, contiguous=True)(t).shape, (bs, sl, nf))\ntest_eq(View(-1, 2, 10)(t).shape, (bs, 1, 2, 10))\ntest_eq(Reshape(-1, 2, 10)(t).shape, (bs, 1, 2, 10))\ntest_eq(Reshape()(t).shape, (2, 20))\ntest_eq(Reshape(-1)(t).shape, (40,))\nTranspose(1,2), Permute(0,2,1), View(-1, 2, 10), Transpose(1,2, contiguous=True), Reshape(-1, 2, 10), Noop\n\n(Transpose(1, 2),\n Permute(dims=0, 2, 1),\n View(bs, -1, 2, 10),\n Transpose(dims=1, 2).contiguous(),\n Reshape(bs, -1, 2, 10),\n Sequential())\n\n\n\nsource\n\n\nDropPath\n\n DropPath (p=None)\n\nDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\nIt’s similar to Dropout but it drops individual connections instead of nodes. Original code in https://github.com/rwightman/pytorch-image-models (timm library)\n\nt = torch.ones(100,2,3)\ntest_eq(DropPath(0.)(t), t)\nassert DropPath(0.5)(t).max() >= 1\n\n\nsource\n\n\nSharpen\n\n Sharpen (T=0.5)\n\nThis is used to increase confidence in predictions - MixMatch paper\n\nn_samples = 1000\nn_classes = 3\n\nt = (torch.rand(n_samples, n_classes) - .5) * 10\nprobas = F.softmax(t, -1)\nsharpened_probas = Sharpen()(probas)\nplt.plot(probas.flatten().sort().values, color='r')\nplt.plot(sharpened_probas.flatten().sort().values, color='b')\nplt.show()\ntest_gt(sharpened_probas[n_samples//2:].max(-1).values.sum().item(), probas[n_samples//2:].max(-1).values.sum().item())\n\n\n\n\n\n\n\n\n\nsource\n\n\nSequential\n\n Sequential (*args)\n\nClass that allows you to pass one or multiple inputs\n\nsource\n\n\nTimeDistributed\n\n TimeDistributed (module, batch_first=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nget_calibrator\n\n get_calibrator (calibrator=None, n_classes=1, **kwargs)\n\n\nsource\n\n\nMatrix_Scale\n\n Matrix_Scale (n_classes=1, dirichlet=False)\n\nUsed to perform Matrix Scaling (dirichlet=False) or Dirichlet calibration (dirichlet=True)\n\nsource\n\n\nVector_Scale\n\n Vector_Scale (n_classes=1, dirichlet=False)\n\nUsed to perform Vector Scaling (dirichlet=False) or Diagonal Dirichlet calibration (dirichlet=True)\n\nsource\n\n\nTemp_Scale\n\n Temp_Scale (temp=1.0, dirichlet=False)\n\nUsed to perform Temperature Scaling (dirichlet=False) or Single-parameter Dirichlet calibration (dirichlet=True)\n\nbs = 2\nc_out = 3\n\nt = torch.rand(bs, c_out)\nfor calibrator, cal_name in zip(['temp', 'vector', 'matrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']): \n cal = get_calibrator(calibrator, n_classes=c_out)\n# print(calibrator)\n# print(cal.weight, cal.bias, '\\n')\n test_eq(cal(t), t)\n test_eq(cal.__class__.__name__, cal_name)\nfor calibrator, cal_name in zip(['dtemp', 'dvector', 'dmatrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']):\n cal = get_calibrator(calibrator, n_classes=c_out)\n# print(calibrator)\n# print(cal.weight, cal.bias, '\\n')\n test_eq(cal(t), F.log_softmax(t, dim=1))\n test_eq(cal.__class__.__name__, cal_name)\n\n\nbs = 2\nc_out = 3\n\nt = torch.rand(bs, c_out)\n\ntest_eq(Temp_Scale()(t).shape, t.shape)\ntest_eq(Vector_Scale(c_out)(t).shape, t.shape)\ntest_eq(Matrix_Scale(c_out)(t).shape, t.shape)\ntest_eq(Temp_Scale(dirichlet=True)(t).shape, t.shape)\ntest_eq(Vector_Scale(c_out, dirichlet=True)(t).shape, t.shape)\ntest_eq(Matrix_Scale(c_out, dirichlet=True)(t).shape, t.shape)\n\ntest_eq(Temp_Scale()(t), t)\ntest_eq(Vector_Scale(c_out)(t), t)\ntest_eq(Matrix_Scale(c_out)(t), t)\n\n\nbs = 2\nc_out = 5\n\nt = torch.rand(bs, c_out)\ntest_eq(Vector_Scale(c_out)(t), t)\ntest_eq(Vector_Scale(c_out).weight.data, torch.ones(c_out))\ntest_eq(Vector_Scale(c_out).weight.requires_grad, True)\ntest_eq(type(Vector_Scale(c_out).weight), torch.nn.parameter.Parameter)\n\n\nbs = 2\nc_out = 3\nweight = 2\nbias = 1\n\nt = torch.rand(bs, c_out)\ntest_eq(Matrix_Scale(c_out)(t).shape, t.shape)\ntest_eq(Matrix_Scale(c_out).weight.requires_grad, True)\ntest_eq(type(Matrix_Scale(c_out).weight), torch.nn.parameter.Parameter)\n\n\nsource\n\n\nLogitAdjustmentLayer\n\n LogitAdjustmentLayer (class_priors)\n\nLogit Adjustment for imbalanced datasets\n\nbs, n_classes = 16, 3\nclass_priors = torch.rand(n_classes)\nlogits = torch.randn(bs, n_classes) * 2\ntest_eq(LogitAdjLayer(class_priors)(logits), logits + class_priors)\n\n\nsource\n\n\nMaxPPVPool1d\n\n MaxPPVPool1d ()\n\nDrop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2\n\nsource\n\n\nPPAuc\n\n PPAuc (dim=-1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nPPV\n\n PPV (dim=-1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 2\nnf = 5\nsl = 4\n\nt = torch.rand(bs, nf, sl)\ntest_eq(MaxPPVPool1d()(t).shape, (bs, nf*2, 1))\ntest_eq(MaxPPVPool1d()(t).shape, AdaptiveConcatPool1d(1)(t).shape)\n\n\nsource\n\n\nAdaptiveWeightedAvgPool1d\n\n AdaptiveWeightedAvgPool1d (n_in, seq_len, mult=2, n_layers=2, ln=False,\n dropout=0.5, act=ReLU(), zero_init=True)\n\nGlobal Pooling layer that performs a weighted average along the temporal axis\nIt can be considered as a channel-wise form of local temporal attention. Inspired by the paper: Hyun, J., Seong, H., & Kim, E. (2019). Universal Pooling–A New Pooling Method for Convolutional Neural Networks. arXiv preprint arXiv:1907.11440.\n\nsource\n\n\nGAWP1d\n\n GAWP1d (n_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=ReLU(),\n zero_init=False)\n\nGlobal AdaptiveWeightedAvgPool1d + Flatten\n\nsource\n\n\nGACP1d\n\n GACP1d (output_size=1)\n\nGlobal AdaptiveConcatPool + Flatten\n\nsource\n\n\nGAP1d\n\n GAP1d (output_size=1)\n\nGlobal Adaptive Pooling + Flatten\n\nsource\n\n\ngwa_pool_head\n\n gwa_pool_head (n_in, c_out, seq_len, bn=True, fc_dropout=0.0)\n\n\nsource\n\n\nGlobalWeightedAveragePool1d\n\n GlobalWeightedAveragePool1d (n_in, seq_len)\n\nGlobal Weighted Average Pooling layer\nInspired by Building Efficient CNN Architecture for Offline Handwritten Chinese Character Recognition https://arxiv.org/pdf/1804.01259.pdf\n\nt = torch.randn(16, 64, 50)\nhead = gwa_pool_head(64, 5, 50)\ntest_eq(head(t).shape, (16, 5))\n\n\nsource\n\n\nattentional_pool_head\n\n attentional_pool_head (n_in, c_out, seq_len=None, bn=True, **kwargs)\n\n\nsource\n\n\nGAttP1d\n\n GAttP1d (n_in, c_out, bn=False)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nAttentionalPool1d\n\n AttentionalPool1d (n_in, c_out, bn=False)\n\nGlobal Adaptive Pooling layer inspired by Attentional Pooling for Action Recognition https://arxiv.org/abs/1711.01467\n\nbs, c_in, seq_len = 16, 1, 50\nc_out = 3\nt = torch.rand(bs, c_in, seq_len)\ntest_eq(GAP1d()(t).shape, (bs, c_in))\ntest_eq(GACP1d()(t).shape, (bs, c_in*2))\nbs, c_in, seq_len = 16, 4, 50\nt = torch.rand(bs, c_in, seq_len)\ntest_eq(GAP1d()(t).shape, (bs, c_in))\ntest_eq(GACP1d()(t).shape, (bs, c_in*2))\ntest_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))\ntest_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))\ntest_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=False)(t).shape, (bs, c_in))\ntest_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=True)(t).shape, (bs, c_in))\ntest_eq(AttentionalPool1d(c_in, c_out)(t).shape, (bs, c_out, 1))\n\n\nbs, c_in, seq_len = 16, 128, 50\nc_out = 14\nt = torch.rand(bs, c_in, seq_len)\nattp = attentional_pool_head(c_in, c_out)\ntest_eq(attp(t).shape, (bs, c_out))\n\n\nsource\n\n\nPoolingLayer\n\n PoolingLayer (method='cls', seq_len=None, token=True, seq_last=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nt = torch.arange(24).reshape(2, 3, 4).float()\ntest_eq(PoolingLayer('cls', token=True, seq_last=True)(t), tensor([[ 0., 4., 8.], [12., 16., 20.]]))\ntest_eq(PoolingLayer('max', token=True, seq_last=True)(t), tensor([[ 3., 7., 11.], [15., 19., 23.]]))\ntest_close(PoolingLayer('mean', token=True, seq_last=True)(t), tensor([[ 2., 6., 10.], [14., 18., 22.]]))\ntest_close(PoolingLayer('max-mean', token=True, seq_last=True)(t), tensor([[ 3., 7., 11., 2., 6., 10.],\n [15., 19., 23., 14., 18., 22.]]))\ntest_close(PoolingLayer('flatten', token=True, seq_last=True)(t), tensor([[ 1., 2., 3., 5., 6., 7., 9., 10., 11.],\n [13., 14., 15., 17., 18., 19., 21., 22., 23.]]))\ntest_eq(PoolingLayer('linear', seq_len=4, token=True, seq_last=True)(t).shape, (2, 3))\ntest_eq(PoolingLayer('max', token=False, seq_last=True)(t), tensor([[ 3., 7., 11.], [15., 19., 23.]]))\ntest_close(PoolingLayer('mean', token=False, seq_last=True)(t), tensor([[ 1.5000, 5.5000, 9.5000],\n [13.5000, 17.5000, 21.5000]]))\ntest_close(PoolingLayer('max-mean', token=False, seq_last=True)(t), tensor([[ 3., 7., 11., 1.5000, 5.5000, 9.5000],\n [15., 19., 23., 13.5000, 17.5000, 21.5000]]))\ntest_close(PoolingLayer('flatten', token=False, seq_last=True)(t), tensor([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.],\n [12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.]]))\ntest_eq(PoolingLayer('linear', seq_len=4, token=False, seq_last=True)(t).shape, (2, 3))\n\n\nt = torch.arange(24).reshape(2, 3, 4).swapaxes(1,2).float()\ntest_eq(PoolingLayer('cls', token=True, seq_last=False)(t), tensor([[ 0., 4., 8.], [12., 16., 20.]]))\ntest_eq(PoolingLayer('max', token=True, seq_last=False)(t), tensor([[ 3., 7., 11.], [15., 19., 23.]]))\ntest_close(PoolingLayer('mean', token=True, seq_last=False)(t), tensor([[ 2., 6., 10.], [14., 18., 22.]]))\ntest_close(PoolingLayer('max-mean', token=True, seq_last=False)(t), tensor([[ 3., 7., 11., 2., 6., 10.],\n [15., 19., 23., 14., 18., 22.]]))\ntest_close(PoolingLayer('flatten', token=True, seq_last=False)(t), tensor([[ 1., 5., 9., 2., 6., 10., 3., 7., 11.],\n [13., 17., 21., 14., 18., 22., 15., 19., 23.]]))\nt = torch.arange(24).reshape(2, 3, 4).swapaxes(1,2).float()\ntest_eq(PoolingLayer('conv1d', seq_len=4, token=False, seq_last=False)(t).shape, (2, 3))\ntest_eq(PoolingLayer('max', token=False, seq_last=False)(t), tensor([[ 3., 7., 11.], [15., 19., 23.]]))\ntest_close(PoolingLayer('mean', token=False, seq_last=False)(t), tensor([[ 1.5000, 5.5000, 9.5000],\n [13.5000, 17.5000, 21.5000]]))\ntest_close(PoolingLayer('max-mean', token=False, seq_last=False)(t), tensor([[ 3., 7., 11., 1.5000, 5.5000, 9.5000],\n [15., 19., 23., 13.5000, 17.5000, 21.5000]]))\ntest_close(PoolingLayer('flatten', token=False, seq_last=False)(t), tensor([[ 0., 4., 8., 1., 5., 9., 2., 6., 10., 3., 7., 11.],\n [12., 16., 20., 13., 17., 21., 14., 18., 22., 15., 19., 23.]]))\ntest_eq(PoolingLayer('conv1d', seq_len=4, token=False, seq_last=False)(t).shape, (2, 3))\n\n\nsource\n\n\nReGLU\n\n ReGLU ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nGEGLU\n\n GEGLU ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nget_act_fn\n\n get_act_fn (act, **act_kwargs)\n\n\ntest_eq(get_act_fn(nn.ReLU).__repr__(), \"ReLU()\")\ntest_eq(get_act_fn(nn.ReLU()).__repr__(), \"ReLU()\")\ntest_eq(get_act_fn(nn.LeakyReLU, negative_slope=0.05).__repr__(), \"LeakyReLU(negative_slope=0.05)\")\ntest_eq(get_act_fn('reglu').__repr__(), \"ReGLU()\")\ntest_eq(get_act_fn('leakyrelu', negative_slope=0.05).__repr__(), \"LeakyReLU(negative_slope=0.05)\")\n\n\nsource\n\n\nRevIN\n\n RevIN (c_in:int, affine:bool=True, subtract_last:bool=False, dim:int=2,\n eps:float=1e-05)\n\nReversible Instance Normalization layer adapted from\nKim, T., Kim, J., Tae, Y., Park, C., Choi, J. H., & Choo, J. (2021, September). Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations. Original code: https://github.com/ts-kim/RevIN\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\n#features (aka variables or channels)\n\n\naffine\nbool\nTrue\nflag to incidate if RevIN has learnable weight and bias\n\n\nsubtract_last\nbool\nFalse\n\n\n\ndim\nint\n2\nint or tuple of dimensions used to calculate mean and std\n\n\neps\nfloat\n1e-05\nepsilon - parameter added for numerical stability\n\n\n\n\nsource\n\n\nRevIN\n\n RevIN (c_in:int, affine:bool=True, subtract_last:bool=False, dim:int=2,\n eps:float=1e-05)\n\nReversible Instance Normalization layer adapted from\nKim, T., Kim, J., Tae, Y., Park, C., Choi, J. H., & Choo, J. (2021, September). Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations. Original code: https://github.com/ts-kim/RevIN\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\n#features (aka variables or channels)\n\n\naffine\nbool\nTrue\nflag to incidate if RevIN has learnable weight and bias\n\n\nsubtract_last\nbool\nFalse\n\n\n\ndim\nint\n2\nint or tuple of dimensions used to calculate mean and std\n\n\neps\nfloat\n1e-05\nepsilon - parameter added for numerical stability\n\n\n\n\nt = ((torch.rand(16, 5, 100) - .25) * torch.Tensor([.01, .1, 1, 10, 100]).reshape(1, -1, 1)).cumsum(-1)\nt_clone = t.clone()\nl = RevIN(5)\nt_norm = l(t, torch.tensor(True))\nt_denorm = l(t_norm, torch.tensor(False))\ntest_close(t_clone, t_denorm, eps=1e-3)\n\n\nmodel = RevIN(5, affine=True)\ntry:\n scripted_model = torch.jit.script(model)\n file_path = f\"test_scripted_model.pt\"\n torch.jit.save(scripted_model, file_path)\n scripted_model = torch.jit.load(file_path)\n\n inp = ((torch.rand(16, 5, 100) - .25) * torch.Tensor([.01, .1, 1, 10, 100]).reshape(1, -1, 1)).cumsum(-1)\n normed_output = model(inp, torch.tensor(True))\n demormed_output = model(normed_output, torch.tensor(False))\n scripted_normed_output = scripted_model(inp, torch.tensor(True))\n scripted_denormed_output = scripted_model(scripted_normed_output, torch.tensor(False))\n test_close(normed_output, scripted_normed_output)\n test_close(demormed_output, scripted_denormed_output)\n os.remove(file_path)\n del scripted_model\n gc.collect()\n print('scripting ok')\nexcept Exception as e:\n print(f'scripting failed: {e}')\n\nscripting ok\n\n\n\nsource\n\n\ncreate_pool_head\n\n create_pool_head (n_in, c_out, seq_len=None, concat_pool=False,\n fc_dropout=0.0, bn=False, y_range=None, **kwargs)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\ntest_eq(create_pool_head(nf, c_out, seq_len, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))\ncreate_pool_head(nf, c_out, seq_len, concat_pool=True, bn=True, fc_dropout=.5)\n\nSequential(\n (0): GACP1d(\n (gacp): AdaptiveConcatPool1d(\n (ap): AdaptiveAvgPool1d(output_size=1)\n (mp): AdaptiveMaxPool1d(output_size=1)\n )\n (flatten): Reshape(bs)\n )\n (1): LinBnDrop(\n (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=24, out_features=2, bias=False)\n )\n)\n\n\n\nsource\n\n\nmax_pool_head\n\n max_pool_head (n_in, c_out, seq_len, fc_dropout=0.0, bn=False,\n y_range=None, **kwargs)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(max_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\n\n\nsource\n\n\ncreate_pool_plus_head\n\n create_pool_plus_head (*args, lin_ftrs=None, fc_dropout=0.0,\n concat_pool=True, bn_final=False, lin_first=False,\n y_range=None)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\ntest_eq(create_pool_plus_head(nf, c_out, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))\ncreate_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)\n\nSequential(\n (0): AdaptiveConcatPool1d(\n (ap): AdaptiveAvgPool1d(output_size=1)\n (mp): AdaptiveMaxPool1d(output_size=1)\n )\n (1): Reshape(bs)\n (2): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): Dropout(p=0.25, inplace=False)\n (4): Linear(in_features=24, out_features=512, bias=False)\n (5): ReLU(inplace=True)\n (6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (7): Dropout(p=0.5, inplace=False)\n (8): Linear(in_features=512, out_features=2, bias=False)\n)\n\n\n\nsource\n\n\ncreate_conv_head\n\n create_conv_head (*args, adaptive_size=None, y_range=None)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_conv_head(nf, c_out, seq_len)(t).shape, (bs, c_out))\ntest_eq(create_conv_head(nf, c_out, adaptive_size=50)(t).shape, (bs, c_out))\ncreate_conv_head(nf, c_out, 50)\n\nSequential(\n (0): ConvBlock(\n (0): Conv1d(12, 6, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (1): ConvBlock(\n (0): Conv1d(6, 3, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (2): ConvBlock(\n (0): Conv1d(3, 2, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (3): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n)\n\n\n\nsource\n\n\ncreate_mlp_head\n\n create_mlp_head (nf, c_out, seq_len=None, flatten=True, fc_dropout=0.0,\n bn=False, lin_first=False, y_range=None)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_mlp_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\nt = torch.rand(bs, nf, seq_len)\ncreate_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)\n\nSequential(\n (0): Reshape(bs)\n (1): LinBnDrop(\n (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=240, out_features=2, bias=False)\n )\n)\n\n\n\nsource\n\n\ncreate_fc_head\n\n create_fc_head (nf, c_out, seq_len=None, flatten=True, lin_ftrs=None,\n y_range=None, fc_dropout=0.0, bn=False, bn_final=False,\n act=ReLU(inplace=True))\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_fc_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\ncreate_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)\n\nSequential(\n (0): Reshape(bs)\n (1): LinBnDrop(\n (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=240, out_features=2, bias=False)\n )\n)\n\n\n\nsource\n\n\ncreate_rnn_head\n\n create_rnn_head (*args, fc_dropout=0.0, bn=False, y_range=None)\n\n\nbs = 16\nnf = 12\nc_out = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\ntest_eq(create_rnn_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))\ncreate_rnn_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)\n\nSequential(\n (0): LastStep()\n (1): LinBnDrop(\n (0): BatchNorm1d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=12, out_features=2, bias=False)\n )\n)\n\n\n\nsource\n\n\nimputation_head\n\n imputation_head (c_in, c_out, seq_len=None, ks=1, y_range=None,\n fc_dropout=0.0)\n\n\nbs = 16\nnf = 12\nni = 2\nseq_len = 20\nt = torch.rand(bs, nf, seq_len)\nhead = imputation_head(nf, ni, seq_len=None, ks=1, y_range=None, fc_dropout=0.)\ntest_eq(head(t).shape, (bs, ni, seq_len))\nhead = imputation_head(nf, ni, seq_len=None, ks=1, y_range=(.3,.7), fc_dropout=0.)\ntest_ge(head(t).min(), .3)\ntest_le(head(t).max(), .7)\ny_range = (tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,\n 0.3000, 0.3000, 0.3000]),\n tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,\n 0.8000, 0.8000, 0.8000]))\ntest_ge(head(t).min(), .1)\ntest_le(head(t).max(), .9)\nhead = imputation_head(nf, ni, seq_len=None, ks=1, y_range=y_range, fc_dropout=0.)\nhead\n\nSequential(\n (0): Dropout(p=0.0, inplace=False)\n (1): Conv1d(12, 2, kernel_size=(1,), stride=(1,))\n (2): fastai.layers.SigmoidRange(low=tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,\n 0.3000, 0.3000, 0.3000]), high=tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,\n 0.8000, 0.8000, 0.8000]))\n)\n\n\n\nsource\n\n\ncreate_conv_lin_nd_head\n\n create_conv_lin_nd_head (n_in, n_out, seq_len, d, conv_first=True,\n conv_bn=False, lin_bn=False, fc_dropout=0.0,\n **kwargs)\n\nModule to create a nd output head\n\nbs = 16\nnf = 32\nc = 5\nseq_len = 10\nd = 2\ntarg = torch.randint(0, c, (bs,d))\nt = torch.randn(bs, nf, seq_len)\nhead = conv_lin_nd_head(nf, c, seq_len, d, conv_first=True, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, (bs, d, c))\nloss = CrossEntropyLossFlat()(inp, targ)\nloss, head\n\n(TensorBase(1.7074, grad_fn=<AliasBackward0>),\n create_conv_lin_nd_head(\n (0): Conv1d(32, 5, kernel_size=(1,), stride=(1,))\n (1): Dropout(p=0.5, inplace=False)\n (2): Linear(in_features=10, out_features=2, bias=True)\n (3): Transpose(-1, -2)\n (4): Reshape(bs, 2, 5)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 5\nseq_len = 10\nd = [2, 8]\ntarg = torch.randint(0, c, [bs]+d)\nt = torch.randn(bs, nf, seq_len)\nhead = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, [bs]+d+[c])\nloss = CrossEntropyLossFlat()(inp, targ)\nloss, head\n\n(TensorBase(1.6561, grad_fn=<AliasBackward0>),\n create_conv_lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Linear(in_features=10, out_features=16, bias=True)\n (2): Conv1d(32, 5, kernel_size=(1,), stride=(1,))\n (3): Transpose(-1, -2)\n (4): Reshape(bs, 2, 8, 5)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 1\nseq_len = 10\nd = 2\ntarg = torch.rand(bs, d)\nt = torch.randn(bs, nf, seq_len)\nhead = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, (bs, d))\nloss = L1LossFlat()(inp, targ)\nloss, head\n\n(TensorBase(0.6017, grad_fn=<AliasBackward0>),\n create_conv_lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Linear(in_features=10, out_features=2, bias=True)\n (2): Conv1d(32, 1, kernel_size=(1,), stride=(1,))\n (3): Transpose(-1, -2)\n (4): Reshape(bs, 2)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 1\nseq_len = 10\nd = [2,3]\ntarg = torch.rand(bs, *d)\nt = torch.randn(bs, nf, seq_len)\nhead = conv_lin_nd_head(nf, c, seq_len, d, conv_first=False, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, [bs]+d)\nloss = L1LossFlat()(inp, targ)\nloss, head\n\n(TensorBase(0.5439, grad_fn=<AliasBackward0>),\n create_conv_lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Linear(in_features=10, out_features=6, bias=True)\n (2): Conv1d(32, 1, kernel_size=(1,), stride=(1,))\n (3): Transpose(-1, -2)\n (4): Reshape(bs, 2, 3)\n ))\n\n\n\nsource\n\n\nlin_nd_head\n\n lin_nd_head (n_in, n_out, seq_len=None, d=None, flatten=False,\n use_bn=False, fc_dropout=0.0)\n\nModule to create a nd output head with linear layers\n\nbs = 16\nnf = 32\nseq_len = 50\nx = torch.normal(0, 1, (bs, nf, seq_len))\n\nfor use_bn in [False, True]:\n for fc_dropout in [0, 0.2]:\n for flatten in [False, True]:\n for c in [1, 3]:\n for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:\n for q_len in [1, seq_len]:\n head = lin_nd_head(nf, c, q_len, d, flatten=flatten, use_bn=use_bn, fc_dropout=fc_dropout)\n test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))\n\n\nbs = 16\nnf = 32\nc = 5\nseq_len = 10\nd = 2\ntarg = torch.randint(0, c, (bs,d))\nt = torch.randn(bs, nf, seq_len)\nhead = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, (bs, d, c))\nloss = CrossEntropyLossFlat()(inp, targ)\nloss, head\n\n(TensorBase(1.8360, grad_fn=<AliasBackward0>),\n lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Reshape(bs)\n (2): Linear(in_features=320, out_features=10, bias=True)\n (3): Reshape(bs, 2, 5)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 5\nseq_len = 10\nd = [2, 8]\ntarg = torch.randint(0, c, [bs]+d)\nt = torch.randn(bs, nf, seq_len)\nhead = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, [bs]+d+[c])\nloss = CrossEntropyLossFlat()(inp, targ)\nloss, head\n\n(TensorBase(1.7557, grad_fn=<AliasBackward0>),\n lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Reshape(bs)\n (2): Linear(in_features=320, out_features=80, bias=True)\n (3): Reshape(bs, 2, 8, 5)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 1\nseq_len = 10\nd = 2\ntarg = torch.rand(bs, d)\nt = torch.randn(bs, nf, seq_len)\nhead = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, (bs, d))\nloss = L1LossFlat()(inp, targ)\nloss, head\n\n(TensorBase(0.5978, grad_fn=<AliasBackward0>),\n lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Reshape(bs)\n (2): Linear(in_features=320, out_features=2, bias=True)\n (3): Reshape(bs, 2)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 1\nseq_len = 10\nd = [2,3]\ntarg = torch.rand(bs, *d)\nt = torch.randn(bs, nf, seq_len)\nhead = lin_nd_head(nf, c, seq_len, d, fc_dropout=.5)\ninp = head(t)\ntest_eq(inp.shape, [bs]+d)\nloss = L1LossFlat()(inp, targ)\nloss, head\n\n(TensorBase(0.8286, grad_fn=<AliasBackward0>),\n lin_nd_head(\n (0): Dropout(p=0.5, inplace=False)\n (1): Reshape(bs)\n (2): Linear(in_features=320, out_features=6, bias=True)\n (3): Reshape(bs, 2, 3)\n ))\n\n\n\nsource\n\n\nrocket_nd_head\n\n rocket_nd_head (n_in, n_out, seq_len=None, d=None, use_bn=False,\n fc_dropout=0.0, zero_init=True)\n\nModule to create a nd output head with linear layers for the rocket family of models\n\nbs = 16\nnf = 99\nseq_len = 1\nx = torch.normal(0, 1, (bs, nf, seq_len))\n\nfor use_bn in [False, True]:\n for fc_dropout in [0, 0.2]:\n for c in [1, 3]:\n for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:\n head = rocket_nd_head(nf, c, 1, d, use_bn=use_bn, fc_dropout=fc_dropout)\n test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))\n\n\nsource\n\n\nxresnet1d_nd_head\n\n xresnet1d_nd_head (n_in, n_out, seq_len=None, d=None, use_bn=False,\n fc_dropout=0.0, zero_init=True)\n\nModule to create a nd output head with linear layers for the xresnet family of models\n\nbs = 16\nnf = 99\nseq_len = 2\nx = torch.normal(0, 1, (bs, nf, seq_len))\n\nfor use_bn in [False, True]:\n for fc_dropout in [0, 0.2]:\n for c in [1, 3]:\n for d in [None, (50,), (50,10), (30,5), (50,2,3), (30,2,3)]:\n head = xresnet1d_nd_head(nf, c, 1, d, use_bn=use_bn, fc_dropout=fc_dropout)\n test_eq(head(x).shape, (bs, ) + (d if d is not None else ()) + ((c,) if c != 1 else ()))\n\n\nsource\n\n\ncreate_conv_3d_head\n\n create_conv_3d_head (n_in, n_out, seq_len, d, use_bn=False, **kwargs)\n\nModule to create a nd output head with a convolutional layer\n\nbs = 16\nnf = 32\nc = 5\nseq_len = 10\nd = 10\ntarg = torch.randint(0, c, (bs,d))\nt = torch.randn(bs, nf, seq_len)\nhead = conv_3d_head(nf, c, seq_len, d)\ninp = head(t)\ntest_eq(inp.shape, (bs, d, c))\nloss = CrossEntropyLossFlat()(inp, targ)\nloss, head\n\n(TensorBase(1.7321, grad_fn=<AliasBackward0>),\n create_conv_3d_head(\n (0): ConvBlock(\n (0): Conv1d(32, 5, kernel_size=(1,), stride=(1,))\n )\n (1): Transpose(-1, -2)\n ))\n\n\n\nbs = 16\nnf = 32\nc = 1\nseq_len = 10\nd = 10\ntarg = torch.rand(bs, d)\nt = torch.randn(bs, nf, seq_len)\nhead = conv_3d_head(nf, c, seq_len, d)\ninp = head(t)\ntest_eq(inp.shape, (bs, d))\nloss = L1LossFlat()(inp, targ)\nloss, head\n\n(TensorBase(0.5833, grad_fn=<AliasBackward0>),\n create_conv_3d_head(\n (0): ConvBlock(\n (0): Conv1d(32, 1, kernel_size=(1,), stride=(1,))\n )\n (1): Transpose(-1, -2)\n (2): Squeeze(dim=-1)\n ))\n\n\n\nsource\n\n\nuniversal_pool_head\n\n universal_pool_head (n_in, c_out, seq_len, mult=2, pool_n_layers=2,\n pool_ln=True, pool_dropout=0.5, pool_act=ReLU(),\n zero_init=True, bn=True, fc_dropout=0.0)\n\n\nbs, c_in, seq_len = 16, 128, 50\nc_out = 14\nt = torch.rand(bs, c_in, seq_len)\nuph = universal_pool_head(c_in, c_out, seq_len)\ntest_eq(uph(t).shape, (bs, c_out))\nuph = universal_pool_head(c_in, c_out, seq_len, 2)\ntest_eq(uph(t).shape, (bs, c_out))\n\n\nbs, c_in, seq_len = 16, 128, 50\nc_out = 14\nd = 5\nt = torch.rand(bs, c_in, seq_len)\nfor head in heads: \n print(head.__name__)\n if head.__name__ == \"create_conv_3d_head\":\n h = head(c_in, c_out, seq_len, seq_len)\n test_eq(h(t).shape, (bs, seq_len, c_out))\n elif 'nd' in head.__name__: \n h = head(c_in, c_out, seq_len, d)\n test_eq(h(t).shape, (bs, d, c_out))\n else: \n h = head(c_in, c_out, seq_len)\n test_eq(h(t).shape, (bs, c_out))\n\ncreate_mlp_head\ncreate_fc_head\naverage_pool_head\nmax_pool_head\nconcat_pool_head\ncreate_pool_plus_head\ncreate_conv_head\ncreate_rnn_head\ncreate_conv_lin_nd_head\nlin_nd_head\ncreate_conv_3d_head\nattentional_pool_head\nuniversal_pool_head\ngwa_pool_head\n\n\n\nsource\n\n\nSqueezeExciteBlock\n\n SqueezeExciteBlock (ni, reduction=16)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 2\nni = 32\nsl = 4\nt = torch.rand(bs, ni, sl)\ntest_eq(SqueezeExciteBlock(ni)(t).shape, (bs, ni, sl))\n\n\nsource\n\n\nGaussianNoise\n\n GaussianNoise (sigma=0.1, is_relative_detach=True)\n\nGaussian noise regularizer.\nArgs: sigma (float, optional): relative standard deviation used to generate the noise. Relative means that it will be multiplied by the magnitude of the value your are adding the noise to. This means that sigma can be the same regardless of the scale of the vector. is_relative_detach (bool, optional): whether to detach the variable before computing the scale of the noise. If False then the scale of the noise won’t be seen as a constant but something to optimize: this will bias the network to generate vectors with smaller values.\n\nt = torch.ones(2,3,4)\ntest_ne(GaussianNoise()(t), t)\ntest_eq(GaussianNoise()(t).shape, t.shape)\nt = torch.ones(2,3)\ntest_ne(GaussianNoise()(t), t)\ntest_eq(GaussianNoise()(t).shape, t.shape)\nt = torch.ones(2)\ntest_ne(GaussianNoise()(t), t)\ntest_eq(GaussianNoise()(t).shape, t.shape)\n\n\nsource\n\n\nTokenLayer\n\n TokenLayer (token=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nPositionwiseFeedForward\n\n PositionwiseFeedForward (dim, dropout=0.0, act='reglu', mlp_ratio=1)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nt = torch.randn(2,3,10)\nm = PositionwiseFeedForward(10, dropout=0., act='reglu', mlp_ratio=1)\ntest_eq(m(t).shape, t.shape)\nm = PositionwiseFeedForward(10, dropout=0., act='smelu', mlp_ratio=1)\ntest_eq(m(t).shape, t.shape)\n\n\nsource\n\n\nScaledDotProductAttention\n\n ScaledDotProductAttention (d_model, n_heads, attn_dropout=0.0,\n res_attention=False, lsa=False)\n\nScaled Dot-Product Attention module (Attention is all you need by Vaswani et al., 2017) with optional residual attention from previous layer (Realformer: Transformer likes residual attention by He et al, 2020) and locality self sttention (Vision Transformer for Small-Size Datasets by Lee et al, 2021)\n\nB = 16\nC = 10\nM = 1500 # seq_len\n\nn_heads = 1\nD = 128 # model dimension\nN = 512 # max_seq_len - latent's index dimension\nd_k = D // n_heads\n\nxb = torch.randn(B, C, M)\nxb = (xb - xb.mean()) / xb.std()\n\n# Attention\n# input (Q)\nlin = nn.Linear(M, N, bias=False)\nQ = lin(xb).transpose(1,2)\ntest_eq(Q.shape, (B, N, C))\n\n# q\nto_q = nn.Linear(C, D, bias=False)\nq = to_q(Q)\nq = nn.LayerNorm(D)(q)\n\n# k, v\ncontext = xb.transpose(1,2)\nto_kv = nn.Linear(C, D * 2, bias=False)\nk, v = to_kv(context).chunk(2, dim = -1)\nk = k.transpose(-1, -2)\nk = nn.LayerNorm(M)(k)\nv = nn.LayerNorm(D)(v)\n\ntest_eq(q.shape, (B, N, D))\ntest_eq(k.shape, (B, D, M))\ntest_eq(v.shape, (B, M, D))\n\noutput, attn, scores = ScaledDotProductAttention(D, n_heads, res_attention=True)(q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1))\ntest_eq(output.shape, (B, 1, N, D))\ntest_eq(attn.shape, (B, 1, N, M))\ntest_eq(scores.shape, (B, 1, N, M))\nscores.mean(), scores.std()\n\n(tensor(1.3535e-10, grad_fn=<MeanBackward0>),\n tensor(1.0555, grad_fn=<StdBackward0>))\n\n\n\nsource\n\n\nMultiheadAttention\n\n MultiheadAttention (d_model, n_heads, d_k=None, d_v=None,\n res_attention=False, attn_dropout=0.0,\n proj_dropout=0.0, qkv_bias=True, lsa=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nq = torch.rand([16, 3, 50, 8]) \nk = torch.rand([16, 3, 50, 8]).transpose(-1, -2)\nv = torch.rand([16, 3, 50, 6])\nattn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len\nkey_padding_mask = torch.zeros(16, 50)\nkey_padding_mask[[1, 3, 6, 15], -10:] = 1\nkey_padding_mask = key_padding_mask.bool()\nprint('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)\noutput, attn = ScaledDotProductAttention(24, 3, attn_dropout=.1)(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)\noutput.shape, attn.shape\n\nattn_mask torch.Size([50, 50]) key_padding_mask torch.Size([16, 50])\n\n\n(torch.Size([16, 3, 50, 6]), torch.Size([16, 3, 50, 50]))\n\n\n\nt = torch.rand(16, 50, 128)\noutput, attn = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\noutput.shape, attn.shape\n\n(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))\n\n\nTest multi-head attention with self-locality attention\n\n# lsa (locality self-sttention)\nt = torch.rand(16, 50, 128)\nattn_mask = torch.eye(50).reshape(1, 1, 50, 50).bool()\noutput, attn = MultiheadAttention(d_model=128, n_heads=8, lsa=True)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)\noutput.shape, attn.shape\n\n(torch.Size([16, 50, 128]), torch.Size([16, 8, 50, 50]))\n\n\n\nt = torch.rand(16, 50, 128)\natt_mask = (torch.rand((50, 50)) > .85).float()\natt_mask[att_mask == 1] = -np.inf\n\nmha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)\noutput, attn = mha(t, t, t, attn_mask=att_mask)\ntest_eq(torch.isnan(output).sum().item(), 0)\ntest_eq(torch.isnan(attn).sum().item(), 0)\nloss = output[:2, :].sum()\ntest_eq(torch.isnan(loss).sum().item(), 0)\nloss.backward()\nfor n, p in mha.named_parameters(): \n if p.grad is not None:\n test_eq(torch.isnan(p.grad).sum().item(), 0)\n\n\nt = torch.rand(16, 50, 128)\nattn_mask = (torch.rand((50, 50)) > .85)\n\n# True values will be masked\nmha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)\noutput, attn = mha(t, t, t, attn_mask=att_mask)\ntest_eq(torch.isnan(output).sum().item(), 0)\ntest_eq(torch.isnan(attn).sum().item(), 0)\nloss = output[:2, :].sum()\ntest_eq(torch.isnan(loss).sum().item(), 0)\nloss.backward()\nfor n, p in mha.named_parameters(): \n if p.grad is not None:\n test_eq(torch.isnan(p.grad).sum().item(), 0)\n\n\nsource\n\n\nMultiConv1d\n\n MultiConv1d (ni, nf=None, kss=[1, 3, 5, 7], keep_original=False,\n separable=False, dim=1, **kwargs)\n\nModule that applies multiple convolutions with different kernel sizes\n\nt = torch.rand(16, 6, 37)\ntest_eq(MultiConv1d(6, None, kss=[1,3,5], keep_original=True)(t).shape, [16, 24, 37])\ntest_eq(MultiConv1d(6, 36, kss=[1,3,5], keep_original=False)(t).shape, [16, 36, 37])\ntest_eq(MultiConv1d(6, None, kss=[1,3,5], keep_original=True, dim=-1)(t).shape, [16, 6, 37*4])\ntest_eq(MultiConv1d(6, 60, kss=[1,3,5], keep_original=True)(t).shape, [16, 60, 37])\ntest_eq(MultiConv1d(6, 60, kss=[1,3,5], separable=True)(t).shape, [16, 60, 37])\n\n\nsource\n\n\nLSTMOutput\n\n LSTMOutput ()\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nt = ([1], [2], [3])\ntest_eq(LSTMOutput()(t), [1])\n\n\nsource\n\n\nemb_sz_rule\n\n emb_sz_rule (n_cat)\n\nRule of thumb to pick embedding size corresponding to n_cat (original from fastai)\n\ntest_eq(emb_sz_rule(7), 5)\n\n\nsource\n\n\nTSEmbedding\n\n TSEmbedding (ni, nf, std=0.01, padding_idx=None)\n\nEmbedding layer with truncated normal initialization adapted from fastai\n\nsource\n\n\nMultiEmbedding\n\n MultiEmbedding (c_in, n_cat_embeds, cat_embed_dims=None, cat_pos=None,\n std=0.01, cat_padding_idxs=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\na = alphabet[np.random.randint(0,3,40)]\nb = ALPHABET[np.random.randint(6,10,40)]\nc = np.random.rand(40).reshape(4,1,10)\nmap_a = {k:v for v,k in enumerate(np.unique(a))}\nmap_b = {k:v for v,k in enumerate(np.unique(b))}\nn_embeds = [len(m.keys()) for m in [map_a, map_b]]\nszs = [emb_sz_rule(n) for n in n_embeds]\na = np.asarray(a.map(map_a)).reshape(4,1,10)\nb = np.asarray(b.map(map_b)).reshape(4,1,10)\ninp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()\nmemb = MultiEmbedding(3, n_embeds, cat_pos=[1,2])\n# registered buffers are part of the state_dict() but not module.parameters()\nassert all([(k in memb.state_dict().keys()) for k in ['cat_pos', 'cont_pos']])\nembeddings = memb(inp)\nprint(n_embeds, szs, inp.shape, embeddings.shape)\ntest_eq(embeddings.shape, (inp.shape[0],sum(szs)+1,inp.shape[-1]))\n\n[3, 4] [3, 3] torch.Size([4, 3, 10]) torch.Size([4, 7, 10])\n\n\n\nme = MultiEmbedding(3, 4, cat_pos=2)\ntest_eq(me.cat_embed[0].weight.shape, (4,3))\ntest_eq(me.cat_pos.cpu().item(), 2)", + "crumbs": [ + "Models", + "Layers" + ] + }, + { + "objectID": "models.tabfusiontransformer.html", + "href": "models.tabfusiontransformer.html", + "title": "TabFusionTransformer", + "section": "", + "text": "This is a a Pytorch implementeation of TabTransformerTransformer created by Ignacio Oguiza (oguiza@timeseriesAI.co)\nThis implementation is inspired by:\nHuang, X., Khetan, A., Cvitkovic, M., & Karnin, Z. (2020). TabTransformer: Tabular Data Modeling Using Contextual Embeddings. arXiv preprint https://arxiv.org/pdf/2012.06678\nOfficial repo: https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer\n\nsource\n\nTabFusionTransformer\n\n TabFusionTransformer (classes, cont_names, c_out, d_model=32, n_layers=6,\n n_heads=8, d_k=None, d_v=None, d_ff=None,\n res_attention=True, attention_act='gelu',\n res_dropout=0.0, fc_mults=(4, 2), fc_dropout=0.0,\n fc_act=None, fc_skip=False, fc_bn=False,\n bn_final=False, init=True)\n\nClass that allows you to pass one or multiple inputs\n\nsource\n\n\nTabFusionBackbone\n\n TabFusionBackbone (classes, cont_names, d_model=32, n_layers=6,\n n_heads=8, d_k=None, d_v=None, d_ff=None, init=True,\n res_attention=True, attention_act='gelu',\n res_dropout=0.0)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nSequential\n\n Sequential (*args)\n\nClass that allows you to pass one or multiple inputs\n\nsource\n\n\nifnone\n\n ifnone (a, b)\n\nb if a is None else a\n\nfrom fastai.tabular.all import *\n\n\npath = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(path/'adult.csv')\ndls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names=\"salary\",\n cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],\n cont_names = ['age', 'fnlwgt', 'education-num'],\n procs = [Categorify, FillMissing, Normalize])\nx_cat, x_cont, yb = first(dls.train)\nmodel = TabFusionTransformer(dls.classes, dls.cont_names, dls.c)\ntest_eq(model(x_cat, x_cont).shape, (dls.train.bs, dls.c))\n\n\nsource\n\n\nTSTabFusionTransformer\n\n TSTabFusionTransformer (c_in, c_out, seq_len, classes, cont_names,\n d_model=32, n_layers=6, n_heads=8, d_k=None,\n d_v=None, d_ff=None, res_attention=True,\n attention_act='gelu', res_dropout=0.0,\n fc_mults=(1, 0.5), fc_dropout=0.0, fc_act=None,\n fc_skip=False, fc_bn=False, bn_final=False,\n init=True)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nclasses = {'education': ['#na#', '10th', '11th', '12th', '1st-4th', '5th-6th', '7th-8th', '9th', 'Assoc-acdm', 'Assoc-voc', 'Bachelors', 'Doctorate', \n 'HS-grad', 'Masters', 'Preschool', 'Prof-school', 'Some-college'],\n 'education-num_na': ['#na#', False, True],\n 'marital-status': ['#na#', 'Divorced', 'Married-AF-spouse', 'Married-civ-spouse', 'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed'],\n 'occupation': ['#na#', '?', 'Adm-clerical', 'Armed-Forces', 'Craft-repair', 'Exec-managerial', 'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct', \n 'Other-service', 'Priv-house-serv', 'Prof-specialty', 'Protective-serv', 'Sales', 'Tech-support', 'Transport-moving'],\n 'race': ['#na#', 'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White'],\n 'relationship': ['#na#', 'Husband', 'Not-in-family', 'Other-relative', 'Own-child', 'Unmarried', 'Wife'],\n 'workclass': ['#na#', '?', 'Federal-gov', 'Local-gov', 'Never-worked', 'Private', 'Self-emp-inc', 'Self-emp-not-inc', 'State-gov', 'Without-pay']}\n\ncont_names = ['a', 'b', 'c']\nc_out = 6\nx_ts = torch.randn(64, 3, 10)\nx_cat = torch.randint(0,3,(64,7))\nx_cont = torch.randn(64,3)\nmodel = TSTabFusionTransformer(x_ts.shape[1], c_out, x_ts.shape[-1], classes, cont_names)\nx = (x_ts, (x_cat, x_cont))\ntest_eq(model(x).shape, (x_ts.shape[0], c_out))", + "crumbs": [ + "Models", + "Tabular models", + "TabFusionTransformer" + ] + }, + { + "objectID": "models.omniscalecnn.html", + "href": "models.omniscalecnn.html", + "title": "OmniScaleCNN", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\ngenerate_layer_parameter_list\n\n generate_layer_parameter_list (start, end, layers, in_channel=1)\n\n\nsource\n\n\nget_out_channel_number\n\n get_out_channel_number (paramenter_layer, in_channel, prime_list)\n\n\nsource\n\n\nget_Prime_number_in_a_range\n\n get_Prime_number_in_a_range (start, end)\n\n\nsource\n\n\nOmniScaleCNN\n\n OmniScaleCNN (c_in, c_out, seq_len, layers=[1024, 229376],\n few_shot=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nbuild_layer_with_layer_parameter\n\n build_layer_with_layer_parameter (layer_parameters)\n\nformerly build_layer_with_layer_parameter\n\nsource\n\n\nSampaddingConv1D_BN\n\n SampaddingConv1D_BN (in_channels, out_channels, kernel_size)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nc_in = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, c_in, seq_len)\nm = create_model(OmniScaleCNN, c_in, c_out, seq_len)\ntest_eq(OmniScaleCNN(c_in, c_out, seq_len)(xb).shape, [bs, c_out])\nm\n\nOmniScaleCNN(\n (net): Sequential(\n (0): build_layer_with_layer_parameter(\n (conv_list): ModuleList(\n (0): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 0), value=0)\n (conv1d): Conv1d(3, 56, kernel_size=(1,), stride=(1,))\n (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 1), value=0)\n (conv1d): Conv1d(3, 56, kernel_size=(2,), stride=(1,))\n (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(1, 1), value=0)\n (conv1d): Conv1d(3, 56, kernel_size=(3,), stride=(1,))\n (bn): BatchNorm1d(56, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n )\n (1): build_layer_with_layer_parameter(\n (conv_list): ModuleList(\n (0): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 0), value=0)\n (conv1d): Conv1d(168, 227, kernel_size=(1,), stride=(1,))\n (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 1), value=0)\n (conv1d): Conv1d(168, 227, kernel_size=(2,), stride=(1,))\n (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (2): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(1, 1), value=0)\n (conv1d): Conv1d(168, 227, kernel_size=(3,), stride=(1,))\n (bn): BatchNorm1d(227, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n )\n (2): build_layer_with_layer_parameter(\n (conv_list): ModuleList(\n (0): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 0), value=0)\n (conv1d): Conv1d(681, 510, kernel_size=(1,), stride=(1,))\n (bn): BatchNorm1d(510, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): SampaddingConv1D_BN(\n (padding): ConstantPad1d(padding=(0, 1), value=0)\n (conv1d): Conv1d(681, 510, kernel_size=(2,), stride=(1,))\n (bn): BatchNorm1d(510, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n )\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (hidden): Linear(in_features=1020, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "CNNs", + "OmniScaleCNN" + ] + }, + { + "objectID": "callback.core.html", + "href": "callback.core.html", + "title": "Callback", + "section": "", + "text": "Miscellaneous callbacks for timeseriesAI.", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "callback.core.html#events", + "href": "callback.core.html#events", + "title": "Callback", + "section": "Events", + "text": "Events\nA callback can implement actions on the following events: * before_fit: called before doing anything, ideal for initial setup. * before_epoch: called at the beginning of each epoch, useful for any behavior you need to reset at each epoch. * before_train: called at the beginning of the training part of an epoch. * before_batch: called at the beginning of each batch, just after drawing said batch. It can be used to do any setup necessary for the batch (like hyper-parameter scheduling) or to change the input/target before it goes in the model (change of the input with techniques like mixup for instance). * after_pred: called after computing the output of the model on the batch. It can be used to change that output before it’s fed to the loss. * after_loss: called after the loss has been computed, but before the backward pass. It can be used to add any penalty to the loss (AR or TAR in RNN training for instance). * before_backward: called after the loss has been computed, but only in training mode (i.e. when the backward pass will be used) * after_backward: called after the backward pass, but before the update of the parameters. It can be used to do any change to the gradients before said update (gradient clipping for instance). * after_step: called after the step and before the gradients are zeroed. * after_batch: called at the end of a batch, for any clean-up before the next one. * after_train: called at the end of the training phase of an epoch. * before_validate: called at the beginning of the validation phase of an epoch, useful for any setup needed specifically for validation. * after_validate: called at the end of the validation part of an epoch. * after_epoch: called at the end of an epoch, for any clean-up before the next one. * after_fit: called at the end of training, for final clean-up.", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "callback.core.html#learner-attributes", + "href": "callback.core.html#learner-attributes", + "title": "Callback", + "section": "Learner attributes", + "text": "Learner attributes\nWhen writing a callback, the following attributes of Learner are available:\n\nmodel: the model used for training/validation\ndata: the underlying DataLoaders\nloss_func: the loss function used\nopt: the optimizer used to udpate the model parameters\nopt_func: the function used to create the optimizer\ncbs: the list containing all Callbacks\ndl: current DataLoader used for iteration\nx/xb: last input drawn from self.dl (potentially modified by callbacks). xb is always a tuple (potentially with one element) and x is detuplified. You can only assign to xb.\ny/yb: last target drawn from self.dl (potentially modified by callbacks). yb is always a tuple (potentially with one element) and y is detuplified. You can only assign to yb.\npred: last predictions from self.model (potentially modified by callbacks)\nloss: last computed loss (potentially modified by callbacks)\nn_epoch: the number of epochs in this training\nn_iter: the number of iterations in the current self.dl\nepoch: the current epoch index (from 0 to n_epoch-1)\niter: the current iteration index in self.dl (from 0 to n_iter-1)\n\nThe following attributes are added by TrainEvalCallback and should be available unless you went out of your way to remove that callback: * train_iter: the number of training iterations done since the beginning of this training * pct_train: from 0. to 1., the percentage of training iterations completed * training: flag to indicate if we’re in training mode or not\nThe following attribute is added by Recorder and should be available unless you went out of your way to remove that callback: * smooth_loss: an exponentially-averaged version of the training loss", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "callback.core.html#transform-scheduler", + "href": "callback.core.html#transform-scheduler", + "title": "Callback", + "section": "Transform scheduler", + "text": "Transform scheduler\n\nsource\n\nTransformScheduler\n\n TransformScheduler (schedule_func:<built-infunctioncallable>,\n show_plot:bool=False)\n\nA callback to schedule batch transforms during training based on a function (sched_lin, sched_exp, sched_cos (default), etc)\n\nTransformScheduler(SchedCos(1, 0))\n\nTransformScheduler(<fastai.callback.schedule._Annealer object>)\n\n\n\np = torch.linspace(0.,1,100)\nf = combine_scheds([0.3, 0.4, 0.3], [SchedLin(1.,1.), SchedCos(1.,0.), SchedLin(0.,.0), ])\nplt.plot(p, [f(o) for o in p]);\n\n\n\n\n\n\n\n\n\np = torch.linspace(0.,1,100)\nf = combine_scheds([0.3, 0.7], [SchedCos(0.,1.), SchedCos(1.,0.)])\nplt.plot(p, [f(o) for o in p]);", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "callback.core.html#showgraph", + "href": "callback.core.html#showgraph", + "title": "Callback", + "section": "ShowGraph", + "text": "ShowGraph\n\nsource\n\nShowGraph\n\n ShowGraph (plot_metrics:bool=True, final_losses:bool=True,\n perc:float=0.5)\n\n(Modified) Update a graph of training and validation loss", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "callback.core.html#savemodel", + "href": "callback.core.html#savemodel", + "title": "Callback", + "section": "SaveModel", + "text": "SaveModel\n\nsource\n\nSaveModel\n\n SaveModel (monitor='valid_loss', comp=None, min_delta=0.0, fname='model',\n every_epoch=False, at_end=False, with_opt=False,\n reset_on_fit=True, verbose=False)\n\nA TrackerCallback that saves the model’s best during training and loads it at the end with a verbose option.", + "crumbs": [ + "Training", + "Callbacks", + "Callback" + ] + }, + { + "objectID": "models.fcnplus.html", + "href": "models.fcnplus.html", + "title": "FCNPlus", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nFCNPlus\n\n FCNPlus (c_in, c_out, layers=[128, 256, 128], kss=[7, 5, 3], coord=False,\n separable=False, use_bn=False, fc_dropout=0.0, zero_norm=False,\n act=<class 'torch.nn.modules.activation.ReLU'>, act_kwargs={},\n residual=False, custom_head=None)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nxb = torch.rand(16, 3, 10)\ntest_eq(FCNPlus(3, 2)(xb).shape, [xb.shape[0], 2])\ntest_eq(FCNPlus(3, 2, coord=True, separable=True, act=Swish, residual=True)(xb).shape, [xb.shape[0], 2])\ntest_eq(nn.Sequential(*FCNPlus(3, 2).children())(xb).shape, [xb.shape[0], 2])\ntest_eq(FCNPlus(3, 2, custom_head=partial(mlp_head, seq_len=10))(xb).shape, [xb.shape[0], 2])\n\n\nfrom tsai.models.utils import *\n\n\nmodel = build_ts_model(FCNPlus, 2, 3)\nmodel[-1]\n\nSequential(\n (0): AdaptiveAvgPool1d(output_size=1)\n (1): Squeeze(dim=-1)\n (2): Linear(in_features=128, out_features=3, bias=True)\n)\n\n\n\nfrom tsai.models.FCN import *\n\n\ntest_eq(count_parameters(FCN(3,2)), count_parameters(FCNPlus(3,2)))\n\n\nFCNPlus(3,2)\n\nFCNPlus(\n (backbone): _FCNBlockPlus(\n (convblock1): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock2): ConvBlock(\n (0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)\n (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (convblock3): ConvBlock(\n (0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (add): Sequential()\n )\n (head): Sequential(\n (0): AdaptiveAvgPool1d(output_size=1)\n (1): Squeeze(dim=-1)\n (2): Linear(in_features=128, out_features=2, bias=True)\n )\n)", + "crumbs": [ + "Models", + "CNNs", + "FCNPlus" + ] + }, + { + "objectID": "models.resnetplus.html", + "href": "models.resnetplus.html", + "title": "ResNetPlus", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nResNetPlus\n\n ResNetPlus (c_in, c_out, seq_len=None, nf=64, sa=False, se=None,\n fc_dropout=0.0, concat_pool=False, flatten=False,\n custom_head=None, y_range=None, ks=[7, 5, 3], coord=False,\n separable=False, bn_1st=True, zero_norm=False, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nResBlockPlus\n\n ResBlockPlus (ni, nf, ks=[7, 5, 3], coord=False, separable=False,\n bn_1st=True, zero_norm=False, sa=False, se=None, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.models.layers import Swish\n\n\nxb = torch.rand(2, 3, 4)\ntest_eq(ResNetPlus(3,2)(xb).shape, [xb.shape[0], 2])\ntest_eq(ResNetPlus(3,2,coord=True, separable=True, bn_1st=False, zero_norm=True, act=Swish, act_kwargs={}, fc_dropout=0.5)(xb).shape, [xb.shape[0], 2])\ntest_eq(count_parameters(ResNetPlus(3, 2)), 479490) # for (3,2)\n\n\nfrom tsai.models.ResNet import *\n\n\ntest_eq(count_parameters(ResNet(3, 2)), count_parameters(ResNetPlus(3, 2))) # for (3,2)\n\n\nm = ResNetPlus(3, 2, zero_norm=True, coord=True, separable=True)\nprint('n_params:', count_parameters(m))\nprint(m)\nprint(check_weight(m, is_bn)[0])\n\nn_params: 114820\nResNetPlus(\n (backbone): Sequential(\n (0): ResBlockPlus(\n (convblock1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(4, 4, kernel_size=(7,), stride=(1,), padding=(3,), groups=4, bias=False)\n (pointwise_conv): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(5,), stride=(1,), padding=(2,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock3): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(3,), stride=(1,), padding=(1,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 64, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (1): ResBlockPlus(\n (convblock1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(7,), stride=(1,), padding=(3,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(5,), stride=(1,), padding=(2,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock3): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(65, 128, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (add): Add\n (act): ReLU()\n )\n (2): ResBlockPlus(\n (convblock1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(7,), stride=(1,), padding=(3,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(5,), stride=(1,), padding=(2,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (convblock3): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(3,), stride=(1,), padding=(1,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (shortcut): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (add): Add\n (act): ReLU()\n )\n )\n (head): Sequential(\n (0): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n (1): Linear(in_features=128, out_features=2, bias=True)\n )\n)\n[1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 0. 1.]", + "crumbs": [ + "Models", + "CNNs", + "ResNetPlus" + ] + }, + { + "objectID": "callback.experimental.html", + "href": "callback.experimental.html", + "title": "Experimental Callbacks", + "section": "", + "text": "Miscellaneous experimental callbacks for timeseriesAI.", + "crumbs": [ + "Training", + "Callbacks", + "Experimental Callbacks" + ] + }, + { + "objectID": "callback.experimental.html#gamblers-loss-noisy-labels", + "href": "callback.experimental.html#gamblers-loss-noisy-labels", + "title": "Experimental Callbacks", + "section": "Gambler’s loss: noisy labels", + "text": "Gambler’s loss: noisy labels\n\nsource\n\ngambler_loss\n\n gambler_loss (reward=2)\n\n\nsource\n\n\nGamblersCallback\n\n GamblersCallback (after_create=None, before_fit=None, before_epoch=None,\n before_train=None, before_batch=None, after_pred=None,\n after_loss=None, before_backward=None,\n after_cancel_backward=None, after_backward=None,\n before_step=None, after_cancel_step=None,\n after_step=None, after_cancel_batch=None,\n after_batch=None, after_cancel_train=None,\n after_train=None, before_validate=None,\n after_cancel_validate=None, after_validate=None,\n after_cancel_epoch=None, after_epoch=None,\n after_cancel_fit=None, after_fit=None)\n\nA callback to use metrics with gambler’s loss\n\nfrom tsai.data.external import *\nfrom tsai.data.core import *\nfrom tsai.models.InceptionTime import *\nfrom tsai.models.layers import *\nfrom tsai.learner import *\nfrom fastai.metrics import *\nfrom tsai.metrics import *\n\n\nX, y, splits = get_UCR_data('NATOPS', return_split=False)\ntfms = [None, TSCategorize()]\ndsets = TSDatasets(X, y, tfms=tfms, splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[64, 128])\nloss_func = gambler_loss()\nlearn = ts_learner(dls, InceptionTime(dls.vars, dls.c + 1), loss_func=loss_func, cbs=GamblersCallback, metrics=[accuracy])\nlearn.fit_one_cycle(1)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n1.840055\n1.945397\n0.166667\n00:05", + "crumbs": [ + "Training", + "Callbacks", + "Experimental Callbacks" + ] + }, + { + "objectID": "callback.experimental.html#uncertainty-based-data-augmentation", + "href": "callback.experimental.html#uncertainty-based-data-augmentation", + "title": "Experimental Callbacks", + "section": "Uncertainty-based data augmentation", + "text": "Uncertainty-based data augmentation\n\nsource\n\nUBDAug\n\n UBDAug (batch_tfms:list, N:int=2, C:int=4, S:int=1)\n\nA callback to implement the uncertainty-based data augmentation.\n\nfrom tsai.models.utils import *\n\n\nX, y, splits = get_UCR_data('NATOPS', return_split=False)\ntfms = [None, TSCategorize()]\ndsets = TSDatasets(X, y, tfms=tfms, splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=[TSStandardize()])\nmodel = build_ts_model(InceptionTime, dls=dls)\nTS_tfms = [TSMagScale(.75, p=.5), TSMagWarp(.1, p=0.5), TSWindowWarp(.25, p=.5), \n TSSmooth(p=0.5), TSRandomResizedCrop(.1, p=.5), \n TSRandomCropPad(.3, p=0.5), \n TSMagAddNoise(.5, p=.5)]\n\nubda_cb = UBDAug(TS_tfms, N=2, C=4, S=2)\nlearn = ts_learner(dls, model, cbs=ubda_cb, metrics=accuracy)\nlearn.fit_one_cycle(1)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n1.817080\n1.791119\n0.077778\n00:14", + "crumbs": [ + "Training", + "Callbacks", + "Experimental Callbacks" + ] + }, + { + "objectID": "models.transformermodel.html", + "href": "models.transformermodel.html", + "title": "TransformerModel", + "section": "", + "text": "This is an unofficial PyTorch implementation created by Ignacio Oguiza - oguiza@timeseriesAI.co\n\n\nsource\n\nTransformerModel\n\n TransformerModel (c_in, c_out, d_model=64, n_head=1, d_ffn=128,\n dropout=0.1, activation='relu', n_layers=1)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nnvars = 3\nseq_len = 96\nc_out = 2\nxb = torch.rand(bs, nvars, seq_len)\n\n\nmodel = TransformerModel(nvars, c_out, d_model=64, n_head=1, d_ffn=128, dropout=0.1, activation='gelu', n_layers=3)\ntest_eq(model(xb).shape, [bs, c_out])\nprint(count_parameters(model))\nmodel\n\n100930\n\n\nTransformerModel(\n (permute): Permute(dims=2, 0, 1)\n (inlinear): Linear(in_features=3, out_features=64, bias=True)\n (relu): ReLU()\n (transformer_encoder): TransformerEncoder(\n (layers): ModuleList(\n (0): TransformerEncoderLayer(\n (self_attn): MultiheadAttention(\n (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)\n )\n (linear1): Linear(in_features=64, out_features=128, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n (linear2): Linear(in_features=128, out_features=64, bias=True)\n (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (dropout1): Dropout(p=0.1, inplace=False)\n (dropout2): Dropout(p=0.1, inplace=False)\n )\n (1): TransformerEncoderLayer(\n (self_attn): MultiheadAttention(\n (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)\n )\n (linear1): Linear(in_features=64, out_features=128, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n (linear2): Linear(in_features=128, out_features=64, bias=True)\n (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (dropout1): Dropout(p=0.1, inplace=False)\n (dropout2): Dropout(p=0.1, inplace=False)\n )\n (2): TransformerEncoderLayer(\n (self_attn): MultiheadAttention(\n (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)\n )\n (linear1): Linear(in_features=64, out_features=128, bias=True)\n (dropout): Dropout(p=0.1, inplace=False)\n (linear2): Linear(in_features=128, out_features=64, bias=True)\n (norm1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (norm2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (dropout1): Dropout(p=0.1, inplace=False)\n (dropout2): Dropout(p=0.1, inplace=False)\n )\n )\n (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n )\n (transpose): Transpose(1, 0)\n (max): Max(dim=1, keepdim=False)\n (outlinear): Linear(in_features=64, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "Transformers", + "TransformerModel" + ] + }, + { + "objectID": "index.html#description", + "href": "index.html#description", + "title": "tsai", + "section": "Description", + "text": "Description\n\nState-of-the-art Deep Learning library for Time Series and Sequences.\n\ntsai is an open-source deep learning package built on top of Pytorch & fastai focused on state-of-the-art techniques for time series tasks like classification, regression, forecasting, imputation…\ntsai is currently under active development by timeseriesAI.", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#whats-new", + "href": "index.html#whats-new", + "title": "tsai", + "section": "What’s new:", + "text": "What’s new:\nDuring the last few releases, here are some of the most significant additions to tsai:\n\nNew models: PatchTST (Accepted by ICLR 2023), RNN with Attention (RNNAttention, LSTMAttention, GRUAttention), TabFusionTransformer, …\nNew datasets: we have increased the number of datasets you can download using tsai:\n\n128 univariate classification datasets\n30 multivariate classification datasets\n15 regression datasets\n62 forecasting datasets\n9 long term forecasting datasets\n\nNew tutorials: PatchTST. Based on some of your requests, we are planning to release additional tutorials on data preparation and forecasting.\nNew functionality: sklearn-type pipeline transforms, walk-foward cross validation, reduced RAM requirements, and a lot of new functionality to perform more accurate time series forecasts.\nPytorch 2.0 support.", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#installation", + "href": "index.html#installation", + "title": "tsai", + "section": "Installation", + "text": "Installation\n\nPip install\nYou can install the latest stable version from pip using:\npip install tsai\nIf you plan to develop tsai yourself, or want to be on the cutting edge, you can use an editable install. First install PyTorch, and then:\ngit clone https://github.com/timeseriesAI/tsai\npip install -e \"tsai[dev]\"\nNote: starting with tsai 0.3.0 tsai will only install hard dependencies. Other soft dependencies (which are only required for selected tasks) will not be installed by default (this is the recommended approach. If you require any of the dependencies that is not installed, tsai will ask you to install it when necessary). If you still want to install tsai with all its dependencies you can do it by running:\npip install tsai[extras]\n\n\nConda install\nYou can also install tsai using conda (note that if you replace conda with mamba the install process will be much faster and more reliable):\nconda install -c timeseriesai tsai", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#documentation", + "href": "index.html#documentation", + "title": "tsai", + "section": "Documentation", + "text": "Documentation\nHere’s the link to the documentation.", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#available-models", + "href": "index.html#available-models", + "title": "tsai", + "section": "Available models:", + "text": "Available models:\nHere’s a list with some of the state-of-the-art models available in tsai:\n\nLSTM (Hochreiter, 1997) (paper)\nGRU (Cho, 2014) (paper)\nMLP - Multilayer Perceptron (Wang, 2016) (paper)\nFCN - Fully Convolutional Network (Wang, 2016) (paper)\nResNet - Residual Network (Wang, 2016) (paper)\nLSTM-FCN (Karim, 2017) (paper)\nGRU-FCN (Elsayed, 2018) (paper)\nmWDN - Multilevel wavelet decomposition network (Wang, 2018) (paper)\nTCN - Temporal Convolutional Network (Bai, 2018) (paper)\nMLSTM-FCN - Multivariate LSTM-FCN (Karim, 2019) (paper)\nInceptionTime (Fawaz, 2019) (paper)\nRocket (Dempster, 2019) (paper)\nXceptionTime (Rahimian, 2019) (paper)\nResCNN - 1D-ResCNN (Zou , 2019) (paper)\nTabModel - modified from fastai’s TabularModel\nOmniScale - Omni-Scale 1D-CNN (Tang, 2020) (paper)\nTST - Time Series Transformer (Zerveas, 2020) (paper)\nTabTransformer (Huang, 2020) (paper)\nTSiT Adapted from ViT (Dosovitskiy, 2020) (paper)\nMiniRocket (Dempster, 2021) (paper)\nXCM - An Explainable Convolutional Neural Network (Fauvel, 2021) (paper)\ngMLP - Gated Multilayer Perceptron (Liu, 2021) (paper)\nTSPerceiver - Adapted from Perceiver IO (Jaegle, 2021) (paper)\nGatedTabTransformer (Cholakov, 2022) (paper)\nTSSequencerPlus - Adapted from Sequencer (Tatsunami, 2022) (paper)\nPatchTST - (Nie, 2022) (paper)\n\nplus other custom models like: TransformerModel, LSTMAttention, GRUAttention, …", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#how-to-start-using-tsai", + "href": "index.html#how-to-start-using-tsai", + "title": "tsai", + "section": "How to start using tsai?", + "text": "How to start using tsai?\nTo get to know the tsai package, we’d suggest you start with this notebook in Google Colab: 01_Intro_to_Time_Series_Classification It provides an overview of a time series classification task.\nWe have also develop many other tutorial notebooks.\nTo use tsai in your own notebooks, the only thing you need to do after you have installed the package is to run this:\nfrom tsai.all import *", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#examples", + "href": "index.html#examples", + "title": "tsai", + "section": "Examples", + "text": "Examples\nThese are just a few examples of how you can use tsai:\n\nBinary, univariate classification\nTraining:\nfrom tsai.basics import *\n\nX, y, splits = get_classification_data('ECG200', split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize()\nclf = TSClassifier(X, y, splits=splits, path='models', arch=\"InceptionTimePlus\", tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, cbs=ShowGraph())\nclf.fit_one_cycle(100, 3e-4)\nclf.export(\"clf.pkl\") \nInference:\nfrom tsai.inference import load_learner\n\nclf = load_learner(\"models/clf.pkl\")\nprobas, target, preds = clf.get_X_preds(X[splits[1]], y[splits[1]])\n\n\nMulti-class, multivariate classification\nTraining:\nfrom tsai.basics import *\n\nX, y, splits = get_classification_data('LSST', split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize(by_sample=True)\nmv_clf = TSClassifier(X, y, splits=splits, path='models', arch=\"InceptionTimePlus\", tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, cbs=ShowGraph())\nmv_clf.fit_one_cycle(10, 1e-2)\nmv_clf.export(\"mv_clf.pkl\")\nInference:\nfrom tsai.inference import load_learner\n\nmv_clf = load_learner(\"models/mv_clf.pkl\")\nprobas, target, preds = mv_clf.get_X_preds(X[splits[1]], y[splits[1]])\n\n\nMultivariate Regression\nTraining:\nfrom tsai.basics import *\n\nX, y, splits = get_regression_data('AppliancesEnergy', split_data=False)\ntfms = [None, TSRegression()]\nbatch_tfms = TSStandardize(by_sample=True)\nreg = TSRegressor(X, y, splits=splits, path='models', arch=\"TSTPlus\", tfms=tfms, batch_tfms=batch_tfms, metrics=rmse, cbs=ShowGraph(), verbose=True)\nreg.fit_one_cycle(100, 3e-4)\nreg.export(\"reg.pkl\")\nInference:\nfrom tsai.inference import load_learner\n\nreg = load_learner(\"models/reg.pkl\")\nraw_preds, target, preds = reg.get_X_preds(X[splits[1]], y[splits[1]])\nThe ROCKETs (RocketClassifier, RocketRegressor, MiniRocketClassifier, MiniRocketRegressor, MiniRocketVotingClassifier or MiniRocketVotingRegressor) are somewhat different models. They are not actually deep learning models (although they use convolutions) and are used in a different way.\n⚠️ You’ll also need to install sktime to be able to use them. You can install it separately:\npip install sktime\nor use:\npip install tsai[extras]\nTraining:\nfrom sklearn.metrics import mean_squared_error, make_scorer\nfrom tsai.data.external import get_Monash_regression_data\nfrom tsai.models.MINIROCKET import MiniRocketRegressor\n\nX_train, y_train, *_ = get_Monash_regression_data('AppliancesEnergy')\nrmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)\nreg = MiniRocketRegressor(scoring=rmse_scorer)\nreg.fit(X_train, y_train)\nreg.save('MiniRocketRegressor')\nInference:\nfrom sklearn.metrics import mean_squared_error\nfrom tsai.data.external import get_Monash_regression_data\nfrom tsai.models.MINIROCKET import load_minirocket\n\n*_, X_test, y_test = get_Monash_regression_data('AppliancesEnergy')\nreg = load_minirocket('MiniRocketRegressor')\ny_pred = reg.predict(X_test)\nmean_squared_error(y_test, y_pred, squared=False)\n\n\nForecasting\nYou can use tsai for forecast in the following scenarios:\n\nunivariate or multivariate time series input\nunivariate or multivariate time series output\nsingle or multi-step ahead\n\nYou’ll need to: * prepare X (time series input) and the target y (see documentation) * select PatchTST or one of tsai’s models ending in Plus (TSTPlus, InceptionTimePlus, TSiTPlus, etc). The model will auto-configure a head to yield an output with the same shape as the target input y.\n\nSingle step\nTraining:\nfrom tsai.basics import *\n\nts = get_forecasting_time_series(\"Sunspots\").values\nX, y = SlidingWindow(60, horizon=1)(ts)\nsplits = TimeSplitter(235)(y) \ntfms = [None, TSForecasting()]\nbatch_tfms = TSStandardize()\nfcst = TSForecaster(X, y, splits=splits, path='models', tfms=tfms, batch_tfms=batch_tfms, bs=512, arch=\"TSTPlus\", metrics=mae, cbs=ShowGraph())\nfcst.fit_one_cycle(50, 1e-3)\nfcst.export(\"fcst.pkl\")\nInference:\nfrom tsai.inference import load_learner\n\nfcst = load_learner(\"models/fcst.pkl\", cpu=False)\nraw_preds, target, preds = fcst.get_X_preds(X[splits[1]], y[splits[1]])\nraw_preds.shape\n# torch.Size([235, 1])\n\n\nMulti-step\nThis example show how to build a 3-step ahead univariate forecast.\nTraining:\nfrom tsai.basics import *\n\nts = get_forecasting_time_series(\"Sunspots\").values\nX, y = SlidingWindow(60, horizon=3)(ts)\nsplits = TimeSplitter(235, fcst_horizon=3)(y) \ntfms = [None, TSForecasting()]\nbatch_tfms = TSStandardize()\nfcst = TSForecaster(X, y, splits=splits, path='models', tfms=tfms, batch_tfms=batch_tfms, bs=512, arch=\"TSTPlus\", metrics=mae, cbs=ShowGraph())\nfcst.fit_one_cycle(50, 1e-3)\nfcst.export(\"fcst.pkl\")\nInference:\nfrom tsai.inference import load_learner\nfcst = load_learner(\"models/fcst.pkl\", cpu=False)\nraw_preds, target, preds = fcst.get_X_preds(X[splits[1]], y[splits[1]])\nraw_preds.shape\n# torch.Size([235, 3])", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#input-data-format", + "href": "index.html#input-data-format", + "title": "tsai", + "section": "Input data format", + "text": "Input data format\nThe input format for all time series models and image models in tsai is the same. An np.ndarray (or array-like object like zarr, etc) with 3 dimensions:\n[# samples x # variables x sequence length]\nThe input format for tabular models in tsai (like TabModel, TabTransformer and TabFusionTransformer) is a pandas dataframe. See example.", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#how-to-contribute-to-tsai", + "href": "index.html#how-to-contribute-to-tsai", + "title": "tsai", + "section": "How to contribute to tsai?", + "text": "How to contribute to tsai?\nWe welcome contributions of all kinds. Development of enhancements, bug fixes, documentation, tutorial notebooks, …\nWe have created a guide to help you start contributing to tsai. You can read it here.", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#enterprise-support-and-consulting-services", + "href": "index.html#enterprise-support-and-consulting-services", + "title": "tsai", + "section": "Enterprise support and consulting services:", + "text": "Enterprise support and consulting services:\nWant to make the most out of timeseriesAI/tsai in a professional setting? Let us help. Send us an email to learn more: info@timeseriesai.co", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "index.html#citing-tsai", + "href": "index.html#citing-tsai", + "title": "tsai", + "section": "Citing tsai", + "text": "Citing tsai\nIf you use tsai in your research please use the following BibTeX entry:\n@Misc{tsai,\n author = {Ignacio Oguiza},\n title = {tsai - A state-of-the-art deep learning library for time series and sequential data},\n howpublished = {Github},\n year = {2023},\n url = {https://github.com/timeseriesAI/tsai}\n}", + "crumbs": [ + "tsai" + ] + }, + { + "objectID": "data.image.html", + "href": "data.image.html", + "title": "Imaging Time Series", + "section": "", + "text": "Main functions used to transform time series into TSImage tensors.\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\n\n\nsource\n\nToTSImage\n\n ToTSImage (enc=None, dec=None, split_idx=None, order=None)\n\nDelegates (__call__,decode,setup) to (encodes,decodes,setups) if split_idx matches\n\nsource\n\n\nTSImage\n\n TSImage (x, **kwargs)\n\nA Tensor which support subclass pickling, and maintains metadata when casting or after methods\n\nsource\n\n\nTSToPlot\n\n TSToPlot (size:Optional[int]=224, dpi:int=100, lw=1, **kwargs)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by creating a matplotlib plot.\n\nout = TSToPlot()(TSTensor(X[:2]), split_idx=0)\nprint(out.shape)\nout[0].show()\n\ntorch.Size([2, 3, 224, 224])\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nTSToMat\n\n TSToMat (size=224, dpi=100, cmap=None, **kwargs)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by creating a matplotlib matrix. Input data must be normalized with a range(-1, 1)\n\nout = TSToMat()(TSTensor(X[:2]), split_idx=0)\nprint(out.shape)\nout[0].show()\n\ntorch.Size([2, 3, 224, 224])\n\n\n\n\n\n\n\n\n\n\nout = TSToMat(cmap='spring')(TSTensor(X[:2]), split_idx=0)\nprint(out.shape)\nout[0].show()\n\ntorch.Size([2, 3, 224, 224])\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nTSToJRP\n\n TSToJRP (size=224, cmap=None, dimension=1, time_delay=1, threshold=None,\n percentage=10)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Joint Recurrence Plot\n\nsource\n\n\nTSToRP\n\n TSToRP (size=224, cmap=None, dimension=1, time_delay=1, threshold=None,\n percentage=10, flatten=False)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Recurrence Plot. It requires input to be previously normalized between -1 and 1\n\nsource\n\n\nTSToMTF\n\n TSToMTF (size=224, cmap=None, n_bins=5, image_size=1.0,\n strategy='quantile', overlapping=False, flatten=False)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Markov Transition Field\n\nsource\n\n\nTSToGASF\n\n TSToGASF (size=224, cmap=None, range=None, image_size=1.0,\n sample_range=(-1, 1), method='summation', overlapping=False,\n flatten=False)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Gramian Angular Summation Field. It requires either input to be previously normalized between -1 and 1 or set range to (-1, 1)\n\nsource\n\n\nTSToGADF\n\n TSToGADF (size=224, cmap=None, range=None, image_size=1.0,\n sample_range=(-1, 1), method='summation', overlapping=False,\n flatten=False)\n\nTransforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Gramian Angular Difference Field. It requires either input to be previously normalized between -1 and 1 or set range to (-1, 1)\n\nout = TSToRP()(TSTensor(X[:2]), split_idx=0)\nprint(out.shape)\nout[0].show()\n\ntorch.Size([2, 24, 224, 224])\n\n\n\n\n\n\n\n\n\n\no = TSTensor(X[0][1][None])\nencoder = RecurrencePlot()\na = encoder.fit_transform(o.cpu().numpy())[0]\no = TSTensor(X[0])\nencoder = RecurrencePlot()\nb = encoder.fit_transform(o.cpu().numpy())[1]\ntest_eq(a,b) # channels can all be processed in parallel\n\n\ntest_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[0], TSToRP()(TSTensor(X[0][0][None]), split_idx=False)[0])\ntest_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[1], TSToRP()(TSTensor(X[0][1][None]), split_idx=False)[0])\ntest_eq(TSToRP()(TSTensor(X[0]), split_idx=False)[2], TSToRP()(TSTensor(X[0][2][None]), split_idx=False)[0])\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\ntfms = [None, Categorize()]\nbts = [[TSNormalize(), TSToPlot(100)],\n [TSNormalize(), TSToMat(100)],\n [TSNormalize(), TSToGADF(100)],\n [TSNormalize(), TSToGASF(100)],\n [TSNormalize(), TSToMTF(100)],\n [TSNormalize(), TSToRP(100)]]\nbtns = ['Plot', 'Mat', 'GADF', 'GASF', 'MTF', 'RP']\ndsets = TSDatasets(X, y, tfms=tfms, splits=splits)\nfor i, (bt, btn) in enumerate(zip(bts, btns)):\n dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=8, batch_tfms=bt)\n test_eq(dls.vars, 3 if i <2 else X.shape[1])\n test_eq(dls.vars, 3 if i <2 else X.shape[1])\n test_eq(dls.len, (100,100))\n xb, yb = dls.train.one_batch()\n print(i, btn, xb, xb.dtype, xb.min(), xb.max())\n xb[0].show()\n plt.show()\n\n0 Plot TSImage(shape:torch.Size([8, 3, 100, 100])) torch.float32 0.054901961237192154 1.0\n1 Mat TSImage(shape:torch.Size([8, 3, 100, 100])) torch.float32 0.019607843831181526 1.0\n2 GADF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 2.980232238769531e-07 0.9999997019767761\n3 GASF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 0.938302218914032\n4 MTF TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 1.0\n5 RP TSImage(shape:torch.Size([8, 24, 100, 100])) torch.float32 0.0 0.8106333613395691\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nThe simplest way to train a model using time series to image transforms is this:\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\ntfms = [None, Categorize()]\nbatch_tfms = [TSNormalize(), TSToGADF(224)]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nlearn = tsimage_learner(dls, xresnet34)\nlearn.fit_one_cycle(10)", + "crumbs": [ + "Data", + "Imaging Time Series" + ] + }, + { + "objectID": "callback.mvp.html", + "href": "callback.mvp.html", + "title": "MVP (aka TSBERT)", + "section": "", + "text": "Self-Supervised Pretraining of Time Series Models\n\nMasked Value Predictor callback used to predict time series step values after a binary mask has been applied.\n\nsource\n\nself_mask\n\n self_mask (o)\n\n\nsource\n\n\ncreate_future_mask\n\n create_future_mask (o, r=0.15, sync=False)\n\n\nsource\n\n\ncreate_variable_mask\n\n create_variable_mask (o, r=0.15)\n\n\nsource\n\n\ncreate_subsequence_mask\n\n create_subsequence_mask (o, r=0.15, lm=3, stateful=True, sync=False)\n\n\nt = torch.rand(16, 3, 100)\nmask = create_subsequence_mask(t, sync=False)\ntest_eq(mask.shape, t.shape)\nmask = create_subsequence_mask(t, sync=True)\ntest_eq(mask.shape, t.shape)\nmask = create_variable_mask(t)\ntest_eq(mask.shape, t.shape)\nmask = create_future_mask(t)\ntest_eq(mask.shape, t.shape)\n\n\no = torch.randn(2, 3, 4)\no[o>.5] = np.nan\ntest_eq(torch.isnan(self_mask(o)).sum(), 0)\n\n\nt = torch.rand(16, 30, 100)\nmask = create_subsequence_mask(t, r=.15) # default settings\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'sample 0 subsequence mask (sync=False) - default mean: {mask[0].float().mean().item():.3f}')\nplt.show()\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[1], cmap='cool')\nplt.title(f'sample 1 subsequence mask (sync=False) - default mean: {mask[1].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nt = torch.rand(16, 30, 100)\nmask = create_subsequence_mask(t, r=.5) # 50% of values masked\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'sample 0 subsequence mask (r=.5) mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(16, 30, 100)\nmask = create_subsequence_mask(t, lm=5) # average length of mask = 5 \ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'sample 0 subsequence mask (lm=5) mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(16, 30, 100)\nmask = create_subsequence_mask(t, stateful=False) # individual time steps masked \ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'per sample subsequence mask (stateful=False) mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(1, 30, 100)\nmask = create_subsequence_mask(t, sync=True) # all time steps masked simultaneously\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'per sample subsequence mask (sync=True) mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(1, 30, 100)\nmask = create_variable_mask(t) # masked variables\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'per sample variable mask mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(1, 30, 100)\nmask = create_future_mask(t, r=.15, sync=True) # masked steps\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'future mask mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nt = torch.rand(1, 30, 100)\nmask = create_future_mask(t, r=.15, sync=False) # masked steps\nmask = create_future_mask(t, r=.15, sync=True) # masked steps\ntest_eq(mask.dtype, torch.bool)\nplt.figure(figsize=(10, 3))\nplt.pcolormesh(mask[0], cmap='cool')\nplt.title(f'future mask mean: {mask[0].float().mean().item():.3f}')\nplt.show()\n\n\n\n\n\n\n\n\n\nsource\n\n\ncreate_mask\n\n create_mask (o, r=0.15, lm=3, stateful=True, sync=False,\n subsequence_mask=True, variable_mask=False,\n future_mask=False)\n\n\nsource\n\n\nMVP\n\n MVP (r:float=0.15, subsequence_mask:bool=True, lm:float=3.0,\n stateful:bool=True, sync:bool=False, variable_mask:bool=False,\n future_mask:bool=False, custom_mask:Optional=None,\n sel_vars:Optional[list]=None, nan_to_num:int=0,\n window_size:Optional[tuple]=None, dropout:float=0.1, crit:<built-\n infunctioncallable>=None, weights_path:Optional[str]=None,\n target_dir:str='./models/MVP', fname:str='model',\n save_best:bool=True, verbose:bool=False)\n\nBasic class handling tweaks of the training loop by changing a Learner in various events\n\n\nExperiments\n\nfrom tsai.data.external import get_UCR_data, check_data\nfrom tsai.data.preprocessing import TSStandardize, TSNan2Value\nfrom tsai.data.core import TSCategorize, get_ts_dls\nfrom tsai.learner import ts_learner\nfrom tsai.models.InceptionTimePlus import InceptionTimePlus\n\n\ndsid = 'MoteStrain'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ncheck_data(X, y, splits, False)\nX[X<-1] = np.nan # This is to test the model works well even if nan values are passed through the dataloaders.\n\nX - shape: [1272 samples x 1 features x 84 timesteps] type: memmap dtype:float32 isnan: 0\ny - shape: (1272,) type: memmap dtype:<U1 n_classes: 2 (636 samples per class) ['1', '2'] isnan: False\nsplits - n_splits: 2 shape: [20, 1252] overlap: False\n\n\n\n# Pre-train\ntfms = [None, [TSCategorize()]]\nbatch_tfms = [TSStandardize(by_var=True)]\nunlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nlearn = ts_learner(unlabeled_dls, InceptionTimePlus, cbs=[MVP(fname=f'{dsid}', window_size=(.5, 1))]) # trained on variable window size\nlearn.fit_one_cycle(1, 3e-3)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\ntime\n\n\n\n\n0\n1.270972\n1.194974\n00:06\n\n\n\n\n\n\nlearn = ts_learner(unlabeled_dls, InceptionTimePlus, cbs=[MVP(weights_path=f'models/MVP/{dsid}.pth')])\nlearn.fit_one_cycle(1, 3e-3)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\ntime\n\n\n\n\n0\n0.837741\n1.200484\n00:07\n\n\n\n\n\n\nlearn.MVP.show_preds(sharey=True) # these preds are highly inaccurate as the model's been trained for just 1 epoch for testing purposes\n\n\n\n\n\n\n\n\n\n# Fine-tune\ntfms = [None, [TSCategorize()]]\nbatch_tfms = [TSStandardize(by_var=True), TSNan2Value()]\nlabeled_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)\nlearn = ts_learner(labeled_dls, InceptionTimePlus, pretrained=True, weights_path=f'models/MVP/{dsid}.pth', metrics=accuracy)\nlearn.fit_one_cycle(1)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n0.773015\n0.744267\n0.460863\n00:09\n\n\n\n\n\n\ntfms = [None, [TSCategorize()]]\nbatch_tfms = [TSStandardize(by_var=True), TSNan2Value()]\nunlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)\nfname = f'{dsid}_test'\nmvp = MVP(subsequence_mask=True, sync='random', variable_mask=True, future_mask=True, fname=fname)\nlearn = ts_learner(unlabeled_dls, InceptionTimePlus, metrics=accuracy, cbs=mvp) # Metrics will not be used!\n\n/Users/nacho/opt/anaconda3/envs/py37torch113/lib/python3.7/site-packages/ipykernel_launcher.py:42: UserWarning: Only future_mask will be used\n\n\n\ntfms = [None, [TSCategorize()]]\nbatch_tfms = [TSStandardize(by_var=True)]\nunlabeled_dls = get_ts_dls(X, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=64)\nfname = f'{dsid}_test'\nmvp = MVP(subsequence_mask=True, sync='random', variable_mask=True, future_mask=True, custom_mask=partial(create_future_mask, r=.15),\n fname=fname)\nlearn = ts_learner(unlabeled_dls, InceptionTimePlus, metrics=accuracy, cbs=mvp) # Metrics will not be used!\n\n/Users/nacho/opt/anaconda3/envs/py37torch113/lib/python3.7/site-packages/ipykernel_launcher.py:40: UserWarning: Only custom_mask will be used\n\n\n\ntry: os.remove(\"models/MVP/MoteStrain.pth\")\nexcept OSError: pass\ntry: os.remove(\"models/MVP/model.pth\")\nexcept OSError: pass", + "crumbs": [ + "Training", + "Callbacks", + "MVP (aka TSBERT)" + ] + }, + { + "objectID": "models.xceptiontimeplus.html", + "href": "models.xceptiontimeplus.html", + "title": "XceptionTimePlus", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co modified on:\nFawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.\nOfficial InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime\n\nsource\n\nXceptionTimePlus\n\n XceptionTimePlus (c_in, c_out, seq_len=None, nf=16, nb_filters=None,\n coord=False, norm='Batch', concat_pool=False,\n adaptive_size=50, custom_head=None, residual=True,\n zero_norm=False, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={})\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nXceptionBlockPlus\n\n XceptionBlockPlus (ni, nf, residual=True, coord=False, norm='Batch',\n zero_norm=False, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={},\n ks=40, kss=None, bottleneck=True, separable=True,\n bn_1st=True, norm_act=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nXceptionModulePlus\n\n XceptionModulePlus (ni, nf, ks=40, kss=None, bottleneck=True,\n coord=False, separable=True, norm='Batch',\n zero_norm=False, bn_1st=True, act=<class\n 'torch.nn.modules.activation.ReLU'>, act_kwargs={},\n norm_act=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nvars = 3\nseq_len = 12\nc_out = 2\nxb = torch.rand(bs, vars, seq_len)\n\n\ntest_eq(XceptionTimePlus(vars,c_out)(xb).shape, [bs, c_out])\ntest_eq(XceptionTimePlus(vars,c_out, nf=32)(xb).shape, [bs, c_out])\ntest_eq(XceptionTimePlus(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])\ntest_eq(XceptionTimePlus(vars,c_out, residual=False)(xb).shape, [bs, c_out])\ntest_eq(XceptionTimePlus(vars,c_out, coord=True)(xb).shape, [bs, c_out])\ntest_eq(XceptionTimePlus(vars,c_out, concat_pool=True)(xb).shape, [bs, c_out])\ntest_eq(count_parameters(XceptionTimePlus(3, 2)), 399540)\n\n\nm = XceptionTimePlus(2,3)\ntest_eq(check_weight(m, is_bn)[0].sum(), 5)\ntest_eq(len(check_bias(m, is_conv)[0]), 0)\nm = XceptionTimePlus(2,3, zero_norm=True)\ntest_eq(check_weight(m, is_bn)[0].sum(), 5)\nm = XceptionTimePlus(2,3, zero_norm=True, norm_act=True)\ntest_eq(check_weight(m, is_bn)[0].sum(), 7)\n\n\nm = XceptionTimePlus(2,3, coord=True)\ntest_eq(len(get_layers(m, cond=is_layer(AddCoords1d))), 25)\ntest_eq(len(get_layers(m, cond=is_layer(nn.Conv1d))), 37)\nm = XceptionTimePlus(2,3, bottleneck=False, coord=True)\ntest_eq(len(get_layers(m, cond=is_layer(AddCoords1d))), 21)\ntest_eq(len(get_layers(m, cond=is_layer(nn.Conv1d))), 33)\n\n\nm = XceptionTimePlus(vars, c_out, seq_len=seq_len, custom_head=mlp_head)\ntest_eq(m(xb).shape, [bs, c_out])\n\n\nXceptionTimePlus(vars, c_out, coord=True)\n\nXceptionTimePlus(\n (backbone): XceptionBlockPlus(\n (xception): ModuleList(\n (0): XceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(17, 17, kernel_size=(39,), stride=(1,), padding=(19,), groups=17, bias=False)\n (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(17, 17, kernel_size=(19,), stride=(1,), padding=(9,), groups=17, bias=False)\n (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(17, 17, kernel_size=(9,), stride=(1,), padding=(4,), groups=17, bias=False)\n (pointwise_conv): Conv1d(17, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n )\n (1): XceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(65, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(39,), stride=(1,), padding=(19,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(19,), stride=(1,), padding=(9,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(33, 33, kernel_size=(9,), stride=(1,), padding=(4,), groups=33, bias=False)\n (pointwise_conv): Conv1d(33, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(65, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n )\n (2): XceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(39,), stride=(1,), padding=(19,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(19,), stride=(1,), padding=(9,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(65, 65, kernel_size=(9,), stride=(1,), padding=(4,), groups=65, bias=False)\n (pointwise_conv): Conv1d(65, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n )\n (3): XceptionModulePlus(\n (bottleneck): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (convs): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(39,), stride=(1,), padding=(19,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(19,), stride=(1,), padding=(9,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(129, 129, kernel_size=(9,), stride=(1,), padding=(4,), groups=129, bias=False)\n (pointwise_conv): Conv1d(129, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n )\n (mp_conv): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (concat): Concat(dim=1)\n )\n )\n (shortcut): ModuleList(\n (0): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(4, 128, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 512, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (act): ModuleList(\n (0): ReLU()\n (1): ReLU()\n )\n (add): Add\n )\n (head): Sequential(\n (0): AdaptiveAvgPool1d(output_size=50)\n (1): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(513, 256, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (2): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(257, 128, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (3): ConvBlock(\n (0): AddCoords1d()\n (1): Conv1d(129, 2, kernel_size=(1,), stride=(1,), bias=False)\n (2): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): ReLU()\n )\n (4): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Reshape(bs)\n )\n )\n)", + "crumbs": [ + "Models", + "CNNs", + "XceptionTimePlus" + ] + }, + { + "objectID": "models.misc.html", + "href": "models.misc.html", + "title": "Miscellaneous", + "section": "", + "text": "This contains a set of experiments.\n\n\nsource\n\nInputWrapper\n\n InputWrapper (arch, c_in, c_out, seq_len, new_c_in=None,\n new_seq_len=None, **kwargs)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nfrom tsai.models.TST import *\n\n\nxb = torch.randn(16, 1, 1000)\nmodel = InputWrapper(TST, 1, 4, 1000, 10, 224)\ntest_eq(model.to(xb.device)(xb).shape, (16,4))\n\n\nsource\n\n\nResidualWrapper\n\n ResidualWrapper (model)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nRecursiveWrapper\n\n RecursiveWrapper (model, n_steps, anchored=False)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nxb = torch.randn(16, 1, 20)\nmodel = RecursiveWrapper(TST(1, 1, 20), 5)\ntest_eq(model.to(xb.device)(xb).shape, (16, 5))", + "crumbs": [ + "Models", + "Miscellaneous", + "Miscellaneous" + ] + }, + { + "objectID": "models.tssequencerplus.html", + "href": "models.tssequencerplus.html", + "title": "TSSequencerPlus", + "section": "", + "text": "This is a PyTorch implementation created by Ignacio Oguiza (oguiza@timeseriesAI.co) based on Sequencer: Deep LSTM for Image Classification\nclass TSSequencerPlus(nn.Sequential):\n r\"\"\"Time Series Sequencer model based on:\n\n Tatsunami, Y., & Taki, M. (2022). Sequencer: Deep LSTM for Image Classification. arXiv preprint arXiv:2205.01972.\n Official implementation: https://github.com/okojoalg/sequencer\n\n Args:\n c_in: the number of features (aka variables, dimensions, channels) in the time series dataset.\n c_out: the number of target classes.\n seq_len: number of time steps in the time series.\n d_model: total dimension of the model (number of features created by the model).\n depth: number of blocks in the encoder.\n act: the activation function of positionwise feedforward layer.\n lstm_dropout: dropout rate applied to the lstm sublayer.\n dropout: dropout applied to to the embedded sequence steps after position embeddings have been added and \n to the mlp sublayer in the encoder.\n drop_path_rate: stochastic depth rate.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n lstm_bias: determines whether bias is applied to the LSTM layer.\n pre_norm: if True normalization will be applied as the first step in the sublayers. Defaults to False.\n use_token: if True, the output will come from the transformed token. This is meant to be use in classification tasks.\n use_pe: flag to indicate if positional embedding is used.\n n_cat_embeds: list with the sizes of the dictionaries of embeddings (int).\n cat_embed_dims: list with the sizes of each embedding vector (int).\n cat_padding_idxs: If specified, the entries at cat_padding_idxs do not contribute to the gradient; therefore, the embedding vector at cat_padding_idxs\n are not updated during training. Use 0 for those categorical embeddings that may have #na# values. Otherwise, leave them as None.\n You can enter a combination for different embeddings (for example, [0, None, None]).\n cat_pos: list with the position of the categorical variables in the input.\n token_size: Size of the embedding function used to reduce the sequence length (similar to ViT's patch size)\n tokenizer: nn.Module or callable that will be used to reduce the sequence length\n feature_extractor: nn.Module or callable that will be used to preprocess the time series before \n the embedding step. It is useful to extract features or resample the time series.\n flatten: flag to indicate if the 3d logits will be flattened to 2d in the model's head if use_token is set to False. \n If use_token is False and flatten is False, the model will apply a pooling layer.\n concat_pool: if True the head begins with fastai's AdaptiveConcatPool2d if concat_pool=True; otherwise, it uses traditional average pooling. \n fc_dropout: dropout applied to the final fully connected layer.\n use_bn: flag that indicates if batchnorm will be applied to the head.\n bias_init: values used to initialized the output layer.\n y_range: range of possible y values (used in regression tasks). \n custom_head: custom head that will be applied to the network. It must contain all kwargs (pass a partial function)\n verbose: flag to control verbosity of the model.\n\n Input:\n x: bs (batch size) x nvars (aka features, variables, dimensions, channels) x seq_len (aka time steps)\n \"\"\"\n \n def __init__(self, c_in:int, c_out:int, seq_len:int, d_model:int=128, depth:int=6, act:str='gelu',\n lstm_dropout:float=0., dropout:float=0., drop_path_rate:float=0., mlp_ratio:int=1, lstm_bias:bool=True, \n pre_norm:bool=False, use_token:bool=False, use_pe:bool=True, \n cat_pos:Optional[list]=None, n_cat_embeds:Optional[list]=None, cat_embed_dims:Optional[list]=None, cat_padding_idxs:Optional[list]=None,\n token_size:int=None, tokenizer:Optional[Callable]=None, feature_extractor:Optional[Callable]=None, \n flatten:bool=False, concat_pool:bool=True, fc_dropout:float=0., use_bn:bool=False, \n bias_init:Optional[Union[float, list]]=None, y_range:Optional[tuple]=None, custom_head:Optional[Callable]=None, verbose:bool=True,\n **kwargs):\n\n if use_token and c_out == 1: \n use_token = False\n pv(\"use_token set to False as c_out == 1\", verbose)\n backbone = _TSSequencerBackbone(c_in, seq_len, depth=depth, d_model=d_model, act=act,\n lstm_dropout=lstm_dropout, dropout=dropout, drop_path_rate=drop_path_rate, \n pre_norm=pre_norm, mlp_ratio=mlp_ratio, use_pe=use_pe, use_token=use_token, \n n_cat_embeds=n_cat_embeds, cat_embed_dims=cat_embed_dims, cat_padding_idxs=cat_padding_idxs, cat_pos=cat_pos, \n feature_extractor=feature_extractor, token_size=token_size, tokenizer=tokenizer)\n\n self.head_nf = d_model\n self.c_out = c_out\n self.seq_len = seq_len\n\n # Head\n if custom_head:\n if isinstance(custom_head, nn.Module): head = custom_head\n else: head = custom_head(self.head_nf, c_out, seq_len, **kwargs)\n else:\n nf = d_model\n layers = []\n if use_token: \n layers += [TokenLayer()]\n elif flatten:\n layers += [Reshape(-1)]\n nf = nf * seq_len\n else:\n if concat_pool: nf *= 2\n layers = [GACP1d(1) if concat_pool else GAP1d(1)]\n if use_bn: layers += [nn.BatchNorm1d(nf)]\n if fc_dropout: layers += [nn.Dropout(fc_dropout)]\n \n # Last layer\n linear = nn.Linear(nf, c_out)\n if bias_init is not None: \n if isinstance(bias_init, float): nn.init.constant_(linear.bias, bias_init)\n else: linear.bias = nn.Parameter(torch.as_tensor(bias_init, dtype=torch.float32))\n layers += [linear]\n\n if y_range: layers += [SigmoidRange(*y_range)]\n head = nn.Sequential(*layers)\n super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))\n \n \nTSSequencer = TSSequencerPlus\nsource", + "crumbs": [ + "Models", + "RNNs", + "TSSequencerPlus" + ] + }, + { + "objectID": "models.tssequencerplus.html#feature-extractor", + "href": "models.tssequencerplus.html#feature-extractor", + "title": "TSSequencerPlus", + "section": "Feature extractor", + "text": "Feature extractor\nIt’s a known fact that transformers cannot be directly applied to long sequences. To avoid this, we have included a way to subsample the sequence to generate a more manageable input.\n\nfrom tsai.data.validation import get_splits\nfrom tsai.data.core import get_ts_dls\n\n\nX = np.zeros((10, 3, 5000)) \ny = np.random.randint(0,2,X.shape[0])\nsplits = get_splits(y)\ndls = get_ts_dls(X, y, splits=splits)\nxb, yb = dls.train.one_batch()\nxb\n\n\n\n\n\n\n\n\nTSTensor(samples:8, vars:3, len:5000, device=cpu, dtype=torch.float32)\n\n\nIf you try to use SequencerPlus, it’s likely you’ll get an ‘out-of-memory’ error.\nTo avoid this you can subsample the sequence reducing the input’s length. This can be done in multiple ways. Here are a few examples:\n\n# Separable convolution (to avoid mixing channels)\nfeature_extractor = Conv1d(xb.shape[1], xb.shape[1], ks=100, stride=50, padding=0, groups=xb.shape[1]).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 99])\n\n\n\n# Convolution (if you want to mix channels or change number of channels)\nfeature_extractor=MultiConv1d(xb.shape[1], 64, kss=[1,3,5,7,9], keep_original=True).to(default_device())\ntest_eq(feature_extractor.to(xb.device)(xb).shape, (xb.shape[0], 64, xb.shape[-1]))\n\n\n# MaxPool\nfeature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.MaxPool1d(kernel_size=100, stride=50)).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 100])\n\n\n\n# AvgPool\nfeature_extractor = nn.Sequential(Pad1d((0, 50), 0), nn.AvgPool1d(kernel_size=100, stride=50)).to(default_device())\nfeature_extractor.to(xb.device)(xb).shape\n\ntorch.Size([8, 3, 100])\n\n\nOnce you decide what type of transform you want to apply, you just need to pass the layer as the feature_extractor attribute:\n\nbs = 16\nnvars = 4\nseq_len = 1000\nc_out = 2\nd_model = 128\n\nxb = torch.rand(bs, nvars, seq_len)\nfeature_extractor = partial(Conv1d, ks=5, stride=3, padding=0, groups=xb.shape[1])\nmodel = TSSequencerPlus(nvars, c_out, seq_len, d_model=d_model, feature_extractor=feature_extractor)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))", + "crumbs": [ + "Models", + "RNNs", + "TSSequencerPlus" + ] + }, + { + "objectID": "models.tssequencerplus.html#categorical-variables", + "href": "models.tssequencerplus.html#categorical-variables", + "title": "TSSequencerPlus", + "section": "Categorical variables", + "text": "Categorical variables\n\nfrom tsai.utils import alphabet, ALPHABET\n\n\na = alphabet[np.random.randint(0,3,40)]\nb = ALPHABET[np.random.randint(6,10,40)]\nc = np.random.rand(40).reshape(4,1,10)\nmap_a = {k:v for v,k in enumerate(np.unique(a))}\nmap_b = {k:v for v,k in enumerate(np.unique(b))}\nn_cat_embeds = [len(m.keys()) for m in [map_a, map_b]]\nszs = [emb_sz_rule(n) for n in n_cat_embeds]\na = np.asarray(a.map(map_a)).reshape(4,1,10)\nb = np.asarray(b.map(map_b)).reshape(4,1,10)\ninp = torch.from_numpy(np.concatenate((c,a,b), 1)).float()\nfeature_extractor = partial(Conv1d, ks=3, padding='same')\nmodel = TSSequencerPlus(3, 2, 10, d_model=64, cat_pos=[1,2], feature_extractor=feature_extractor)\ntest_eq(model(inp).shape, (4,2))\n\n[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.", + "crumbs": [ + "Models", + "RNNs", + "TSSequencerPlus" + ] + }, + { + "objectID": "models.tssequencerplus.html#sequence-embedding", + "href": "models.tssequencerplus.html#sequence-embedding", + "title": "TSSequencerPlus", + "section": "Sequence Embedding", + "text": "Sequence Embedding\nSometimes you have a samples with a very long sequence length. In those cases you may want to reduce it’s length before passing it to the transformer. To do that you may just pass a token_size like in this example:\n\nt = torch.rand(8, 2, 10080)\nSeqTokenizer(2, 128, 60)(t).shape\n\ntorch.Size([8, 128, 168])\n\n\n\nt = torch.rand(8, 2, 10080)\nmodel = TSSequencerPlus(2, 5, 10080, d_model=64, token_size=60)\nmodel(t).shape\n\ntorch.Size([8, 5])", + "crumbs": [ + "Models", + "RNNs", + "TSSequencerPlus" + ] + }, + { + "objectID": "inference.html", + "href": "inference.html", + "title": "Inference", + "section": "", + "text": "Code required for inference.\n\n\nsource\n\nLearner.get_X_preds\n\n Learner.get_X_preds (X, y=None, bs=64, with_input=False,\n with_decoded=True, with_loss=False, act=None)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nX\n\n\n\n\n\ny\nNoneType\nNone\n\n\n\nbs\nint\n64\n\n\n\nwith_input\nbool\nFalse\nreturns the input as well\n\n\nwith_decoded\nbool\nTrue\nreturns decoded predictions as well\n\n\nwith_loss\nbool\nFalse\nreturns the loss per item as well\n\n\nact\nNoneType\nNone\nApply activation to predictions, defaults to self.loss_func’s activation\n\n\n\nGet the predictions and targets, optionally with_input and with_loss.\nwith_decoded will also return the decoded predictions (it reverses the transforms applied).\nThe order of the output is the following:\n\ninput (optional): if with_input is True\nprobabiblities (for classification) or predictions (for regression)\ntarget: if y is provided. Otherwise None.\npredictions: predicted labels. Predictions will be decoded if with_decoded=True.\nloss (optional): if with_loss is set to True and y is not None.\n\n\nfrom tsai.data.external import get_UCR_data\n\n\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, split_data=False)\nX_test = X[splits[1]]\ny_test = y[splits[1]]\n\n\nlearn = load_learner(\"./models/test.pth\")\n\n⚠️ Warning: load_learner (from fastai) requires all your custom code be in the exact same place as when exporting your Learner (the main script, or the module you imported it from).\n\ntest_probas, test_targets, test_preds = learn.get_X_preds(X_test, with_decoded=True)\ntest_probas, test_targets, test_preds\n\n\n\n\n\n\n\n\n(tensor([[0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2639],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640]]),\n None,\n array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4'], dtype='<U1'))\n\n\n\nimport torch\nfrom fastcore.test import test_close\n\n\ntorch_test_probas, torch_test_targets, torch_test_preds = learn.get_X_preds(torch.from_numpy(X_test), with_decoded=True)\ntorch_test_probas, torch_test_targets, torch_test_preds\ntest_close(test_probas, torch_test_probas)\n\n\n\n\n\n\n\n\n\ntest_probas2, test_targets2, test_preds2 = learn.get_X_preds(X_test, y_test, with_decoded=True)\ntest_probas2, test_targets2, test_preds2\n\n\n\n\n\n\n\n\n(tensor([[0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2639],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640]]),\n tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3]),\n array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4'], dtype='<U1'))\n\n\n\ntest_probas3, test_targets3, test_preds3, test_losses3 = learn.get_X_preds(X_test, y_test, with_loss=True, with_decoded=True)\ntest_probas3, test_targets3, test_preds3, test_losses3\n\n\n\n\n\n\n\n\n(tensor([[0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2639],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2421, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2421, 0.2364, 0.2641],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640],\n [0.2574, 0.2422, 0.2364, 0.2640]]),\n tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3]),\n array(['4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4', '4',\n '4', '4', '4', '4'], dtype='<U1'),\n TensorBase([1.3572, 1.3572, 1.3572, 1.3571, 1.3572, 1.4181, 1.4181, 1.4181,\n 1.4181, 1.4181, 1.4181, 1.4181, 1.4181, 1.4181, 1.4423, 1.4422,\n 1.4422, 1.4422, 1.3316, 1.3316, 1.3316, 1.3316, 1.3316, 1.3316,\n 1.3316, 1.3316, 1.3316, 1.3316, 1.3317, 1.3317]))\n\n\n\nfrom fastcore.test import test_eq\n\n\ntest_eq(test_probas, test_probas2)\ntest_eq(test_preds, test_preds2)\ntest_eq(test_probas, test_probas3)\ntest_eq(test_preds, test_preds3)", + "crumbs": [ + "Inference" + ] + }, + { + "objectID": "analysis.html", + "href": "analysis.html", + "title": "Analysis", + "section": "", + "text": "fastai Learner extensions useful to perform prediction analysis.\nsource", + "crumbs": [ + "Analysis" + ] + }, + { + "objectID": "analysis.html#permutation-importance", + "href": "analysis.html#permutation-importance", + "title": "Analysis", + "section": "Permutation importance", + "text": "Permutation importance\nWe’ve also introduced 2 methods to help you better understand how important certain features or certain steps are for your model. Both methods use permutation importance.\n⚠️The permutation feature or step importance is defined as the decrease in a model score when a single feature or step value is randomly shuffled.\nSo if you using accuracy (higher is better), the most important features or steps will be those with a lower value on the chart (as randomly shuffling them reduces performance).\nThe opposite occurs for metrics like mean squared error (lower is better). In this case, the most important features or steps will be those with a higher value on the chart.\nThere are 2 issues with step importance:\n\nthere may be many steps and the analysis could take very long\nsteps will likely have a high autocorrelation\n\nFor those reasons, we’ve introduced an argument (n_steps) to group steps. In this way you’ll be able to know which part of the time series is the most important.\nFeature importance has been adapted from https://www.kaggle.com/cdeotte/lstm-feature-importance by Chris Deotte (Kaggle GrandMaster).\n\nsource\n\nLearner.feature_importance\n\n Learner.feature_importance (X=None, y=None, bs:int=None,\n partial_n:(<class'int'>,<class'float'>)=None,\n method:str='permutation',\n feature_names:list=None, sel_classes:(<class'\n str'>,<class'list'>)=None,\n key_metric_idx:int=0, show_chart:bool=True,\n figsize:tuple=None, title:str=None,\n return_df:bool=True,\n save_df_path:pathlib.Path=None,\n random_state:int=23, verbose:bool=True)\n\nCalculates feature importance as the drop in the model’s validation loss or metric when a feature value is randomly shuffled\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nX\nNoneType\nNone\narray-like object containing the time series. If None, all data in the validation set will be used.\n\n\ny\nNoneType\nNone\narray-like object containing the targets. If None, all targets in the validation set will be used.\n\n\nbs\nint\nNone\nbatch size. If None, the default batch size of the dataloader will be used.\n\n\npartial_n\n(<class ‘int’>, <class ‘float’>)\nNone\n# (int) or % (float) of used to measure feature importance. If None, all data will be used.\n\n\nmethod\nstr\npermutation\nMethod used to invalidate feature. Use ‘permutation’ for shuffling or ‘ablation’ for setting values to np.nan.\n\n\nfeature_names\nlist\nNone\nOptional list of feature names that will be displayed if available. Otherwise var_0, var_1, etc.\n\n\nsel_classes\n(<class ‘str’>, <class ‘list’>)\nNone\nclasses for which the analysis will be made\n\n\nkey_metric_idx\nint\n0\nOptional position of the metric used. If None or no metric is available, the loss will be used.\n\n\nshow_chart\nbool\nTrue\nFlag to indicate if a chart showing permutation feature importance will be plotted.\n\n\nfigsize\ntuple\nNone\nSize of the chart.\n\n\ntitle\nstr\nNone\nOptional string that will be used as the chart title. If None ‘Permutation Feature Importance’.\n\n\nreturn_df\nbool\nTrue\nFlag to indicate if the dataframe with feature importance will be returned.\n\n\nsave_df_path\nPath\nNone\nPath where dataframe containing the permutation feature importance results will be saved.\n\n\nrandom_state\nint\n23\nOptional int that controls the shuffling applied to the data.\n\n\nverbose\nbool\nTrue\nFlag that controls verbosity.\n\n\n\n\nsource\n\n\nLearner.step_importance\n\n Learner.step_importance (X=None, y=None, bs:int=None,\n partial_n:(<class'int'>,<class'float'>)=None,\n method:str='permutation', step_names:list=None,\n sel_classes:(<class'str'>,<class'list'>)=None,\n n_steps:int=1, key_metric_idx:int=0,\n show_chart:bool=True, figsize:tuple=(10, 5),\n title:str=None, xlabel=None,\n return_df:bool=True,\n save_df_path:pathlib.Path=None,\n random_state:int=23, verbose:bool=True)\n\nCalculates step importance as the drop in the model’s validation loss or metric when a step/s value/s is/are randomly shuffled\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nX\nNoneType\nNone\narray-like object containing the time series. If None, all data in the validation set will be used.\n\n\ny\nNoneType\nNone\narray-like object containing the targets. If None, all targets in the validation set will be used.\n\n\nbs\nint\nNone\nbatch size used to compute predictions. If None, the batch size used in the validation set will be used.\n\n\npartial_n\n(<class ‘int’>, <class ‘float’>)\nNone\n# (int) or % (float) of used to measure feature importance. If None, all data will be used.\n\n\nmethod\nstr\npermutation\nMethod used to invalidate feature. Use ‘permutation’ for shuffling or ‘ablation’ for setting values to np.nan.\n\n\nstep_names\nlist\nNone\nOptional list of step names that will be displayed if available. Otherwise 0, 1, 2, etc.\n\n\nsel_classes\n(<class ‘str’>, <class ‘list’>)\nNone\nclasses for which the analysis will be made\n\n\nn_steps\nint\n1\n# of steps that will be analyzed at a time. Default is 1.\n\n\nkey_metric_idx\nint\n0\nOptional position of the metric used. If None or no metric is available, the loss will be used.\n\n\nshow_chart\nbool\nTrue\nFlag to indicate if a chart showing permutation feature importance will be plotted.\n\n\nfigsize\ntuple\n(10, 5)\nSize of the chart.\n\n\ntitle\nstr\nNone\nOptional string that will be used as the chart title. If None ‘Permutation Feature Importance’.\n\n\nxlabel\nNoneType\nNone\nOptional string that will be used as the chart xlabel. If None ‘steps’.\n\n\nreturn_df\nbool\nTrue\nFlag to indicate if the dataframe with feature importance will be returned.\n\n\nsave_df_path\nPath\nNone\nPath where dataframe containing the permutation feature importance results will be saved.\n\n\nrandom_state\nint\n23\nOptional int that controls the shuffling applied to the data.\n\n\nverbose\nbool\nTrue\nFlag that controls verbosity.\n\n\n\n\nfrom tsai.data.external import get_UCR_data\nfrom tsai.data.preprocessing import TSRobustScale, TSStandardize\nfrom tsai.learner import ts_learner\nfrom tsai.models.FCNPlus import FCNPlus\nfrom tsai.metrics import accuracy\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntfms = [None, [TSClassification()]]\nbatch_tfms = TSRobustScale()\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits=splits, sel_vars=[0, 3, 5, 8, 10], sel_steps=slice(-30, None), tfms=tfms, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, FCNPlus, metrics=accuracy, train_metrics=True)\nlearn.fit_one_cycle(2)\nlearn.plot_metrics()\nlearn.show_probas()\nlearn.plot_confusion_matrix()\nlearn.plot_top_losses(X[splits[1]], y[splits[1]], largest=True)\nlearn.top_losses(X[splits[1]], y[splits[1]], largest=True)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\ntrain_accuracy\nvalid_loss\nvalid_accuracy\ntime\n\n\n\n\n0\n1.792511\n0.187500\n1.619460\n0.216667\n00:02\n\n\n1\n1.592681\n0.632812\n1.475991\n0.250000\n00:01\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n(TensorBase([2.3713, 2.3146, 2.2843, 2.2581, 2.2408, 2.2264, 2.2254, 2.2237,\n 2.2230]),\n [9, 56, 128, 25, 104, 116, 57, 72, 108])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nlearn.feature_importance()\n\nX.shape: (180, 24, 51)\ny.shape: (180,)\nSelected metric: accuracy\nComputing feature importance (permutation method)...\n 0 feature: BASELINE accuracy: 0.277778\n 0 feature: var_0 accuracy: 0.238889\n 3 feature: var_3 accuracy: 0.172222\n 5 feature: var_5 accuracy: 0.261111\n 8 feature: var_8 accuracy: 0.250000\n 10 feature: var_10 accuracy: 0.266667\n\n\n\n\n\n\n\n\n \n \n 100.00% [6/6 00:04<00:00]\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nFeature\naccuracy\naccuracy_change\n\n\n\n\n0\nvar_3\n0.172222\n0.105556\n\n\n1\nvar_0\n0.238889\n0.038889\n\n\n2\nvar_8\n0.250000\n0.027778\n\n\n3\nvar_5\n0.261111\n0.016667\n\n\n4\nvar_10\n0.266667\n0.011111\n\n\n5\nBASELINE\n0.277778\n-0.000000\n\n\n\n\n\n\n\n\nlearn.step_importance(n_steps=5);\n\nX.shape: (180, 24, 51)\ny.shape: (180,)\nSelected metric: accuracy\nComputing step importance...\n 0 step: BASELINE accuracy: 0.277778\n 1 step: 21 to 25 accuracy: 0.288889\n 2 step: 26 to 30 accuracy: 0.255556\n 3 step: 31 to 35 accuracy: 0.194444\n 4 step: 36 to 40 accuracy: 0.216667\n 5 step: 41 to 45 accuracy: 0.272222\n 6 step: 46 to 50 accuracy: 0.283333\n\n\n\n\n\n\n\n\n \n \n 100.00% [7/7 00:04<00:00]\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nYou may pass an X and y if you want to analyze a particular group of samples:\nlearn.feature_importance(X=X[splits[1]], y=y[splits[1]])\nIf you have a large validation dataset, you may also use the partial_n argument to select a fixed amount of samples (integer) or a percentage of the validation dataset (float):\nlearn.feature_importance(partial_n=.1)\nlearn.feature_importance(partial_n=100)", + "crumbs": [ + "Analysis" + ] + }, + { + "objectID": "calibration.html", + "href": "calibration.html", + "title": "Calibration", + "section": "", + "text": "Functionality to calibrate a trained, binary classification model using temperature scaling.\n\n\nsource\n\nECELoss\n\n ECELoss (n_bins=10)\n\nCalculates the Expected Calibration Error of a model.\n\nsource\n\n\nTemperatureSetter\n\n TemperatureSetter (model, lr=0.01, max_iter=1000, line_search_fn=None,\n n_bins=10, verbose=True)\n\nCalibrates a binary classification model optimizing temperature\n\nsource\n\n\nModelWithTemperature\n\n ModelWithTemperature (model)\n\nA decorator which wraps a model with temperature scaling\n\nsource\n\n\nplot_calibration_curve\n\n plot_calibration_curve (labels, logits, cal_logits=None, figsize=(6, 6),\n n_bins=10, strategy='uniform')\n\n\nsource\n\n\nLearner.calibrate_model\n\n Learner.calibrate_model (X=None, y=None, lr=0.01, max_iter=10000,\n line_search_fn=None, n_bins=10,\n strategy='uniform', show_plot=True, figsize=(6,\n 6), verbose=True)\n\n\nfrom tsai.basics import *\nfrom tsai.models.FCNPlus import FCNPlus\n\n\nX, y, splits = get_UCR_data('FingerMovements', split_data=False)\ntfms = [None, [TSClassification()]]\nbatch_tfms = TSRobustScale()\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, FCNPlus, metrics=accuracy)\nlearn.fit_one_cycle(2)\n\n\n\n\nepoch\ntrain_loss\nvalid_loss\naccuracy\ntime\n\n\n\n\n0\n0.696826\n0.706016\n0.430000\n00:04\n\n\n1\n0.690209\n0.699720\n0.490000\n00:03\n\n\n\n\n\n\nlearn.calibrate_model()\ncalibrated_model = learn.calibrated_model\n\nBefore temperature - NLL: 0.700, ECE: 0.066\nCalibrating the model...\n...model calibrated\nOptimal temperature: 6.383\nAfter temperature - NLL: 0.693, ECE: 0.019", + "crumbs": [ + "Training", + "Calibration" + ] + }, + { + "objectID": "data.core.html", + "href": "data.core.html", + "title": "Data Core", + "section": "", + "text": "Main Numpy and Times Series functions used throughout the library.\n\n\nfrom tsai.data.external import get_UCR_data\n\n\ndsid = 'OliveOil'\nX_train, y_train, X_valid, y_valid = get_UCR_data(dsid, on_disk=True, force_download=True)\nX_on_disk, y_on_disk, splits = get_UCR_data(dsid, on_disk=True, return_split=False, force_download=True)\nX_in_memory, y_in_memory, splits = get_UCR_data(dsid, on_disk=False, return_split=False, force_download=True)\ny_tensor = cat2int(y_on_disk)\ny_array = y_tensor.numpy()\n\n\nsource\n\nToNumpyTensor\n\n ToNumpyTensor (enc=None, dec=None, split_idx=None, order=None)\n\nTransforms an object into NumpyTensor\n\nsource\n\n\nNumpyTensor\n\n NumpyTensor (o, dtype=None, device=None, copy=None, requires_grad=False,\n **kwargs)\n\nReturns a tensor with subclass NumpyTensor that has a show method\n\nsource\n\n\nTSTensor\n\n TSTensor (o, dtype=None, device=None, copy=None, requires_grad=False,\n **kwargs)\n\nReturns a tensor with subclass TSTensor that has a show method\n\nsource\n\n\nshow_tuple\n\n show_tuple (tup, nrows:int=1, ncols:int=1,\n sharex:Union[bool,Literal['none','all','row','col']]=False,\n sharey:Union[bool,Literal['none','all','row','col']]=False,\n squeeze:bool=True,\n width_ratios:Optional[Sequence[float]]=None,\n height_ratios:Optional[Sequence[float]]=None,\n subplot_kw:Optional[dict[str,Any]]=None,\n gridspec_kw:Optional[dict[str,Any]]=None)\n\nDisplay a timeseries plot from a decoded tuple\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ntup\n\n\n\n\n\nnrows\nint\n1\n\n\n\nncols\nint\n1\n\n\n\nsharex\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsharey\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsqueeze\nbool\nTrue\n- If True, extra dimensions are squeezed out from the returned array of ~matplotlib.axes.Axes: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axes object is returned as a scalar. - for Nx1 or 1xM subplots, the returned object is a 1D numpy object array of Axes objects. - for NxM, subplots with N>1 and M>1 are returned as a 2D array.- If False, no squeezing at all is done: the returned Axes object is always a 2D array containing Axes instances, even if it ends up being 1x1.\n\n\nwidth_ratios\nSequence[float] | None\nNone\nDefines the relative widths of the columns. Each column gets arelative width of width_ratios[i] / sum(width_ratios).If not given, all columns will have the same width. Equivalentto gridspec_kw={'width_ratios': [...]}.\n\n\nheight_ratios\nSequence[float] | None\nNone\nDefines the relative heights of the rows. Each row gets arelative height of height_ratios[i] / sum(height_ratios).If not given, all rows will have the same height. Conveniencefor gridspec_kw={'height_ratios': [...]}.\n\n\nsubplot_kw\ndict[str, Any] | None\nNone\nDict with keywords passed to the~matplotlib.figure.Figure.add_subplot call used to create eachsubplot.\n\n\ngridspec_kw\ndict[str, Any] | None\nNone\nDict with keywords passed to the ~matplotlib.gridspec.GridSpecconstructor used to create the grid the subplots are placed on.\n\n\n\n\nsource\n\n\nToTSTensor\n\n ToTSTensor (enc=None, dec=None, split_idx=None, order=None)\n\nTransforms an object into TSTensor\n\na = np.random.randn(2, 3, 4).astype(np.float16)\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\na = np.random.randn(2, 3, 4).astype(np.float32)\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\na = np.random.randint(10, size=10).astype(np.int64)\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\na = np.random.randint(10, size=10).astype(np.int32)\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\na = torch.rand(2, 3, 4).float()\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\na = torch.randint(3, (10,))\nassert np.shares_memory(a, NumpyTensor(a))\nassert np.shares_memory(a, TSTensor(a))\n\n\nt = TSTensor(torch.randn(2, 3, 4))\np = torch.tensor(3., requires_grad=True)\ntest = torch.add(t, p)\ntest_eq(test.requires_grad, True)\ntest_eq(type(t.data), torch.Tensor)\ntest_eq(type(t), TSTensor)\n\n\nl = L([0,1,2,3], [4,5,6,7], [8, 9, 10, 11])\nTSTensor(l), TSTensor(l).data\n\n(TSTensor(vars:3, len:4, device=cpu, dtype=torch.int64),\n tensor([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]))\n\n\n\nt = TSTensor(X_train)\nfor i in range(4):\n print(t, t.ndim, torch.is_tensor(t))\n if i < 3: t = t[0]\n\nTSTensor(samples:30, vars:1, len:570, device=cpu, dtype=torch.float32) 3 True\nTSTensor(vars:1, len:570, device=cpu, dtype=torch.float32) 2 True\nTSTensor(len:570, device=cpu, dtype=torch.float32) 1 True\nTSTensor([-0.6113752722740173], device=cpu, dtype=torch.float32) 0 True\n\n\n\nTSTensor(X_on_disk)\n\nTSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32)\n\n\n\nToTSTensor()(X_on_disk)\n\nTSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32)\n\n\n\nTSTensor(X_train).show();\n\n\n\n\n\n\n\n\n\nTSTensor(X_train).show(title='1');\n\n\n\n\n\n\n\n\n\nshow_tuple((TSTensor(X_train), ['1', '2']))\n\n\n\n\n\n\n\n\n\nshow_tuple((TSTensor(np.arange(10).reshape(2,5)), 1))\n\n\n\n\n\n\n\n\n\nshow_tuple((TSTensor(np.arange(10).reshape(2,5)), '1'))\n\n\n\n\n\n\n\n\n\nshow_tuple((TSTensor(np.arange(10).reshape(2,5)), [1,2]))\n\n\n\n\n\n\n\n\n\nshow_tuple((TSTensor(np.arange(10).reshape(2,5)), ['1', '2']))\n\n\n\n\n\n\n\n\n\nsource\n\n\nTSMaskTensor\n\n TSMaskTensor (o, dtype=None, device=None, copy=None, requires_grad=False,\n **kwargs)\n\nReturns a tensor with subclass NumpyTensor that has a show method\n\nsource\n\n\nTSLabelTensor\n\n TSLabelTensor (o, dtype=None, device=None, copy=None,\n requires_grad=False, **kwargs)\n\nReturns a tensor with subclass NumpyTensor that has a show method\n\nt = TSLabelTensor(torch.randint(0,10,(1, 2, 3)))\nt, t[0], t[0][0], t[0][0][0]\n\n(TSLabelTensor(shape:(1, 2, 3), device=cpu, dtype=torch.int64),\n TSLabelTensor(shape:(2, 3), device=cpu, dtype=torch.int64),\n TSLabelTensor(shape:(3,), device=cpu, dtype=torch.int64),\n 7)\n\n\n\nt = TSMaskTensor(torch.randint(0,10,(1, 2, 3)))\nt, t[0], t[0][0], t[0][0][0]\n\n(TSMaskTensor(shape:(1, 2, 3), device=cpu, dtype=torch.int64),\n TSMaskTensor(shape:(2, 3), device=cpu, dtype=torch.int64),\n TSMaskTensor(shape:(3,), device=cpu, dtype=torch.int64),\n 1)\n\n\n\nsource\n\n\nTSClassification\n\n TSClassification (vocab=None, sort=True)\n\nVectorized, reversible transform of category string to vocab id\n\nsource\n\n\nToInt\n\n ToInt (enc=None, dec=None, split_idx=None, order=None)\n\nTransforms an object dtype to int\n\nsource\n\n\nToFloat\n\n ToFloat (enc=None, dec=None, split_idx=None, order=None)\n\nTransforms an object dtype to float (vectorized)\n\na = np.random.randint(0, 2, 10)\nb = np.array(['1', '2', '3'])\nc = np.array(['1.0', '2.0', '3.0'])\nt = torch.randint(0, 2, (10, ))\ntest_eq(ToFloat()(a).dtype, 'float32')\ntest_eq(ToFloat()(b).dtype, 'float32')\ntest_eq(ToFloat()(c).dtype, 'float32')\ntest_eq(ToFloat()(t).dtype, torch.float32)\n\n\na = np.random.rand(10)*10\nb = np.array(['1.0', '2.0', '3.0'])\nt = torch.rand(10)*10\ntest_eq(ToInt()(a).dtype, 'int64')\ntest_eq(ToInt()(b).dtype, 'int64')\ntest_eq(ToInt()(t).dtype, torch.long)\n\n\nt = TSClassification()\nt.setup(y_on_disk[splits[0]])\ny_encoded = t(y_on_disk)\nprint(y_encoded)\ntest_eq(t.decodes(y_encoded), y_on_disk)\n\nTensorCategory([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])\n\n\n\ny_multi= np.random.randint(0,3,20)\ny_multi = np.asarray(alphabet[y_multi]).reshape(4,5)\ntfm = TSClassification()\ntfm.setup(y_multi)\nenc_y_multi = tfm(y_multi)\ntest_eq(y_multi, tfm.decode(enc_y_multi))\nenc_y_multi\n\nTensorCategory([[0, 1, 1, 1, 2],\n [0, 1, 2, 1, 0],\n [2, 1, 0, 1, 2],\n [0, 2, 0, 2, 2]])\n\n\n\nsource\n\n\nTSMultiLabelClassification\n\n TSMultiLabelClassification (c=None, vocab=None, add_na=False, sort=True)\n\nReversible combined transform of multi-category strings to one-hot encoded vocab id\n\nsource\n\n\nTSTensorBlock\n\n TSTensorBlock (type_tfms=None, item_tfms=None, batch_tfms=None,\n dl_type=None, dls_kwargs=None)\n\nInitialize self. See help(type(self)) for accurate signature.\n\nsource\n\n\nNumpyTensorBlock\n\n NumpyTensorBlock (type_tfms=None, item_tfms=None, batch_tfms=None,\n dl_type=None, dls_kwargs=None)\n\nInitialize self. See help(type(self)) for accurate signature.\n\ntest_eq(NumpyTensorBlock().item_tfms[0].__name__, 'ToNumpyTensor')\ntest_eq(TSTensorBlock().item_tfms[0].__name__, 'ToTSTensor')\n\n\nsource\n\n\nTSDataset\n\n TSDataset (X, y=None, split=None, sel_vars=None, sel_steps=None,\n types=None, dtype=None, device=None)\n\nInitialize self. See help(type(self)) for accurate signature.\n\nsource\n\n\nNumpyDataset\n\n NumpyDataset (X, y=None, types=None)\n\nInitialize self. See help(type(self)) for accurate signature.\n\nsource\n\n\nTorchDataset\n\n TorchDataset (X, y=None)\n\nInitialize self. See help(type(self)) for accurate signature.\n\na = np.random.rand(5,6,7)\nb = np.random.rand(5)\nds = NumpyDataset(a,b)\nxb, yb = ds[[0,4]]\ntest_eq(xb.shape, (2,6,7))\ntest_eq(yb.shape, (2,))\n\n\nsource\n\n\nTSTfmdLists\n\n TSTfmdLists (items=None, *rest, use_list=False, match=None)\n\nA Pipeline of tfms applied to a collection of items\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nitems\nlist\n\nItems to apply Transforms to\n\n\nuse_list\nbool\nNone\nUse list in L\n\n\n\n\nsource\n\n\nNoTfmLists\n\n NoTfmLists (items=None, *rest, use_list=False, match=None)\n\nA Pipeline of tfms applied to a collection of items\n\nitems = X_on_disk\ntl = TfmdLists(items, tfms=None, splits=splits)\ntest_eq(len(tl), len(X_on_disk))\ntest_eq(len(tl.train), len(splits[0]))\ntest_eq(len(tl.valid), len(splits[1]))\ntest_eq(tl[[0,4,7]], X_on_disk[[0,4,7]])\ntest_eq(tl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])\ntest_eq(tl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])\ntest_eq(tl[0], items[0])\ntest_eq(tl[[0,1]], items[[0,1]])\ntest_eq(tl.decode(tl[0]), tl[0])\ntest_eq((tl.split_idx, tl.train.split_idx, tl.valid.split_idx), (None, 0, 1))\n\n\nitems = X_on_disk\ntl = TSTfmdLists(items, tfms=None, splits=splits)\ntest_eq(len(tl), len(X_on_disk))\ntest_eq(len(tl.train), len(splits[0]))\ntest_eq(len(tl.valid), len(splits[1]))\ntest_eq(tl[[0,4,7]], X_on_disk[[0,4,7]])\ntest_eq(tl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])\ntest_eq(tl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])\ntest_eq(tl[0], items[0])\ntest_eq(tl[[0,1]], items[[0,1]])\ntest_eq(tl.decode(tl[0]), tl[0])\ntest_eq((tl.split_idx, tl.train.split_idx, tl.valid.split_idx), (None, 0, 1))\n\n\nitems = X_on_disk\nntl = NoTfmLists(items, splits=splits)\ntest_eq(len(ntl), len(X_on_disk))\ntest_eq(len(ntl.train), len(splits[0]))\ntest_eq(len(ntl.valid), len(splits[1]))\ntest_eq(ntl._splits, np.arange(len(X_on_disk)))\ntest_eq(ntl.train._splits, np.arange(len(splits[0])))\ntest_eq(ntl.valid._splits, np.arange(len(splits[0]), len(X_on_disk)))\nprint(ntl)\nprint(ntl.train)\nprint(ntl.valid)\ntest_eq(ntl[[0,4,7]], X_on_disk[[0,4,7]])\ntest_eq(ntl.train[[0,4,7]], X_on_disk[splits[0][0,4,7]])\ntest_eq(ntl.valid[[0,4,7]], X_on_disk[splits[1][0,4,7]])\ntest_eq(ntl[0], items[0])\ntest_eq(ntl[[0,1]], items[[0,1]])\ntest_eq(ntl[:], X_on_disk)\nntl[0].shape, stack(ntl[[0,1]]).shape\ntest_eq(ntl.decode(ntl[0]), ntl[0])\nassert id(items) == id(ntl.items) == id(ntl.train.items) == id(ntl.valid.items)\ntest_eq((ntl.split_idx, ntl.train.split_idx, ntl.valid.split_idx), (None, 0, 1))\n\nNoTfmLists: memmap(60, 1, 570)\nNoTfmLists: memmap(30, 1, 570)\nNoTfmLists: memmap(30, 1, 570)\n\n\n\nsubitems = X_on_disk\nnew_ntl = ntl._new(X_on_disk)\ntest_eq(new_ntl[:], X_on_disk)\n\n\nidxs = random_choice(len(X_on_disk), 10, False)\nnew_ntl = ntl._new(X_on_disk[idxs])\ntest_eq(new_ntl[:], X_on_disk[idxs])\n\n\nidxs = random_choice(len(X_on_disk), 10, False)\nnew_ntl = ntl.valid._new(X_on_disk[idxs])\ntest_eq(new_ntl[:], X_on_disk[idxs])\n\n\nsource\n\n\ntscoll_repr\n\n tscoll_repr (c, max_n=10)\n\nString repr of up to max_n items of (possibly lazy) collection c\n\nsource\n\n\nNumpyDatasets\n\n NumpyDatasets (items:list=None, tfms:MutableSequence|Pipeline=None,\n tls:TfmdLists=None, n_inp:int=None, dl_type=None,\n use_list:bool=None, do_setup:bool=True,\n split_idx:int=None, train_setup:bool=True,\n splits:list=None, types=None, verbose:bool=False)\n\nA dataset that creates tuples from X (and y) and applies tfms of type item_tfms\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nitems\nlist\n\nItems to apply Transforms to\n\n\ntfms\nMutableSequence | Pipeline\n\nTransform(s) or Pipeline to apply\n\n\ntls\nNoneType\nNone\n\n\n\nn_inp\nNoneType\nNone\n\n\n\ndl_type\nTfmdDL\nNone\nType of DataLoader\n\n\nuse_list\nbool\nNone\nUse list in L\n\n\ndo_setup\nbool\nTrue\nCall setup() for Transform\n\n\nsplit_idx\nint\nNone\nApply Transform(s) to training or validation set. 0 for training set and 1 for validation set\n\n\ntrain_setup\nbool\nTrue\nApply Transform(s) only on training DataLoader\n\n\nsplits\nlist\nNone\nIndices for training and validation sets\n\n\ntypes\nNoneType\nNone\nTypes of data in items\n\n\nverbose\nbool\nFalse\nPrint verbose output\n\n\n\n\nsource\n\n\nTSDatasets\n\n TSDatasets (items:list=None, tfms:MutableSequence|Pipeline=None,\n tls:TfmdLists=None, n_inp:int=None, dl_type=None,\n use_list:bool=None, do_setup:bool=True, split_idx:int=None,\n train_setup:bool=True, splits:list=None, types=None,\n verbose:bool=False)\n\nA dataset that creates tuples from X (and optionally y) and applies item_tfms\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nitems\nlist\n\nItems to apply Transforms to\n\n\ntfms\nMutableSequence | Pipeline\n\nTransform(s) or Pipeline to apply\n\n\ntls\nNoneType\nNone\n\n\n\nn_inp\nNoneType\nNone\n\n\n\ndl_type\nTfmdDL\nNone\nType of DataLoader\n\n\nuse_list\nbool\nNone\nUse list in L\n\n\ndo_setup\nbool\nTrue\nCall setup() for Transform\n\n\nsplit_idx\nint\nNone\nApply Transform(s) to training or validation set. 0 for training set and 1 for validation set\n\n\ntrain_setup\nbool\nTrue\nApply Transform(s) only on training DataLoader\n\n\nsplits\nlist\nNone\nIndices for training and validation sets\n\n\ntypes\nNoneType\nNone\nTypes of data in items\n\n\nverbose\nbool\nFalse\nPrint verbose output\n\n\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=True)\ni = random_choice(len(splits[0]), 10, False).tolist()\ntest_eq(dsets.subset(i), dsets.train.subset(i))\ndsets.valid.subset(i)\ndsets.valid.subset(i)[[0,6,8]]\ntest_eq(dsets.subset(i)[[0,6,8]], dsets.train.subset(i)[[0,6,8]])\ndsets.subset([0,7,3])\ndsets.subset(i), dsets.train.subset(i), dsets.valid.subset(i)\n\n((#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...],\n (#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...],\n (#10) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(2)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(3))] ...])\n\n\n\ntfms = [None, TSClassification()]\ndsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=tfms, inplace=False)\nassert id(X_on_disk) == id(dsets.ptls[0].items) == id(dsets.train.ptls[0].items) == id(dsets.valid.ptls[0].items)\n\ntfms = None\ndsets = TSDatasets(X_on_disk, splits=splits, tfms=tfms, inplace=False)\nassert id(X_on_disk) == id(dsets.ptls[0].items) == id(dsets.train.ptls[0].items) == id(dsets.valid.ptls[0].items)\n\n\nsource\n\n\nTSDatasets.add_unlabeled\n\n TSDatasets.add_unlabeled (X, inplace=True)\n\n\nsource\n\n\nTSDatasets.add_test\n\n TSDatasets.add_test (X, y=None, inplace=True)\n\n\nsource\n\n\nTSDatasets.add_dataset\n\n TSDatasets.add_dataset (X, y=None, inplace=True)\n\n\nsource\n\n\nNumpyDatasets.add_unlabeled\n\n NumpyDatasets.add_unlabeled (X, inplace=True)\n\n\nsource\n\n\nNumpyDatasets.add_test\n\n NumpyDatasets.add_test (X, y=None, inplace=True)\n\n\nsource\n\n\nNumpyDatasets.add_dataset\n\n NumpyDatasets.add_dataset (X, y=None, inplace=True)\n\n\nsource\n\n\nadd_ds\n\n add_ds (dsets, X, y=None, inplace=True)\n\nCreate test datasets from X (and y) using validation transforms of dsets\n\ndsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=True)\nprint(dsets.train[0][0].shape, dsets.train[[0,1]][0].shape)\nprint(dsets.split_idx, dsets.train.split_idx, dsets.valid.split_idx)\nprint(dsets.new_empty())\ndsets\n\ntorch.Size([1, 570]) torch.Size([2, 1, 570])\nNone 0 1\n(#0) []\n\n\n(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory(1))] ...]\n\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, splits=splits, tfms=[None, TSClassification()], inplace=False)\nprint(dsets.train[0][0].shape, dsets.train[[0,1]][0].shape)\nprint(dsets.split_idx, dsets.train.split_idx, dsets.valid.split_idx)\nprint(dsets.new_empty())\ndsets\n\ntorch.Size([1, 570]) torch.Size([2, 1, 570])\nNone 0 1\n(#0) []\n\n\n(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([0])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1])), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), TensorCategory([1]))] ...]\n\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=splits, inplace=True)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\ntest_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\ntest_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=splits, inplace=False)\nassert id(X_on_disk) == id(dsets.tls[0].items) == id(dsets.ptls[0].items)\nassert id(X_on_disk) == id(dsets.train.tls[0].items) == id(dsets.train.ptls[0].items)\nassert id(X_on_disk) == id(dsets.valid.tls[0].items) == id(dsets.valid.ptls[0].items)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\ntest_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\ntest_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, splits=splits, inplace=True)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, splits=splits, inplace=False)\nassert np.shares_memory(X_on_disk, dsets.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.ptls[0].items)\nassert np.shares_memory(X_on_disk, dsets.train.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.train.ptls[0].items)\nassert np.shares_memory(X_on_disk, dsets.valid.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.valid.ptls[0].items)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits, inplace=True)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\ntest_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\ntest_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits, inplace=False)\nassert np.shares_memory(X_on_disk, dsets.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.ptls[0].items)\nassert np.shares_memory(X_on_disk, dsets.train.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.train.ptls[0].items)\nassert np.shares_memory(X_on_disk, dsets.valid.tls[0].items)\nassert np.shares_memory(X_on_disk, dsets.valid.ptls[0].items)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\nidxs = random_choice(len(dsets.train), 10, False)\ntest_eq(dsets.train[idxs][0].numpy(), X_on_disk[splits[0][idxs]])\ntest_eq(dsets.train[idxs][1].numpy(), y_array[splits[0][idxs]])\n\nidxs = random_choice(len(dsets.valid), 10, False)\ntest_eq(dsets.valid[idxs][0].numpy(), X_on_disk[splits[1][idxs]])\ntest_eq(dsets.valid[idxs][1].numpy(), y_array[splits[1][idxs]])\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=None, inplace=True)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSClassification()], splits=None, inplace=False)\nassert id(X_on_disk) == id(dsets.tls[0].items) == id(dsets.ptls[0].items)\nassert id(X_on_disk) == id(dsets.train.tls[0].items) == id(dsets.train.ptls[0].items)\n\nidxs = random_choice(len(dsets), 10, False)\ntest_eq(dsets[idxs][0].numpy(), X_on_disk[idxs])\ntest_eq(dsets[idxs][1].numpy(), y_array[idxs])\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)\ntest_eq(dsets.train[0:10], dsets.add_dataset(X_on_disk[0:10], y_array[0:10])[:])\ntest_eq(dsets.train[0:10][0], dsets.add_dataset(X_on_disk[0:10])[:][0])\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)\ntorch.save(dsets, 'export/dsets.pth')\ndel dsets\ndsets = torch.load('export/dsets.pth')\ndsets\n\n(#60) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1))] ...]\n\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)\ntorch.save(dsets.train, 'export/dsets.pth')\ndel dsets\ndsets = torch.load('export/dsets.pth')\ndsets\n\n(#30) [(TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(0)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1)), (TSTensor(vars:1, len:570, device=cpu, dtype=torch.float32), tensor(1))] ...]\n\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)\ntest_eq(len(dsets.train), len(X_train))\ndsets = TSDatasets(X_on_disk, y_array, tfms=None, splits=splits)\ntest_eq(len(dsets.train), len(X_train))\ndsets = TSDatasets(X_on_disk, y_array, tfms=[add(1), TSCategorize()], splits=splits)\ntest_eq(len(dsets.train), len(X_train))\n# test_eq(dsets.train[0][0].data, tensor(X_train[0] + 1))\ntest_eq(dsets.train[0][1].item(), y_tensor[0])\n\n\ndsets = TSDatasets(X_on_disk, y_on_disk, tfms=[None, TSCategorize()], splits=splits)\ntest_eq(len(dsets.add_test(X_train, y_train)), len(X_train))\ntest_eq(len(dsets.add_unlabeled(X_train)), len(X_train))\n\n\nX_tensor = torch.randn(100, 4, 50)\ny_tensor = torch.randint(0, 2, size=(len(X_tensor),))\ntensor_splits = (np.arange(80), np.arange(80, 100))\ndsets = TSDatasets(X_tensor, y_tensor, tfms=[None, TSClassification()], splits=tensor_splits)\ntest_eq(type(dsets[0][0]), TSTensor)\n\n\nsource\n\n\nTSDataLoader\n\n TSDataLoader (dataset, bs=64, shuffle=False, drop_last=False,\n num_workers=0, verbose=False, do_setup=True, vocab=None,\n sort=False, weights=None, partial_n=None, sampler=None,\n pin_memory=False, timeout=0, batch_size=None, indexed=None,\n n=None, device=None, persistent_workers=False,\n pin_memory_device='', wif=None, before_iter=None,\n after_item=None, before_batch=None, after_batch=None,\n after_iter=None, create_batches=None, create_item=None,\n create_batch=None, retain=None, get_idxs=None, sample=None,\n shuffle_fn=None, do_batch=None)\n\nTransformed DataLoader\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndataset\n\n\nMap- or iterable-style dataset from which to load the data\n\n\nbs\nint\n64\nSize of batch\n\n\nshuffle\nbool\nFalse\nWhether to shuffle data\n\n\ndrop_last\nbool\nFalse\n\n\n\nnum_workers\nint\nNone\nNumber of CPU cores to use in parallel (default: All available up to 16)\n\n\nverbose\nbool\nFalse\nWhether to print verbose logs\n\n\ndo_setup\nbool\nTrue\nWhether to run setup() for batch transform(s)\n\n\nvocab\nNoneType\nNone\n\n\n\nsort\nbool\nFalse\n\n\n\nweights\nNoneType\nNone\n\n\n\npartial_n\nNoneType\nNone\n\n\n\nsampler\nNoneType\nNone\n\n\n\npin_memory\nbool\nFalse\n\n\n\ntimeout\nint\n0\n\n\n\nbatch_size\nNoneType\nNone\n\n\n\nindexed\nNoneType\nNone\n\n\n\nn\nNoneType\nNone\n\n\n\ndevice\nNoneType\nNone\n\n\n\npersistent_workers\nbool\nFalse\n\n\n\npin_memory_device\nstr\n\n\n\n\nwif\nNoneType\nNone\n\n\n\nbefore_iter\nNoneType\nNone\n\n\n\nafter_item\nNoneType\nNone\n\n\n\nbefore_batch\nNoneType\nNone\n\n\n\nafter_batch\nNoneType\nNone\n\n\n\nafter_iter\nNoneType\nNone\n\n\n\ncreate_batches\nNoneType\nNone\n\n\n\ncreate_item\nNoneType\nNone\n\n\n\ncreate_batch\nNoneType\nNone\n\n\n\nretain\nNoneType\nNone\n\n\n\nget_idxs\nNoneType\nNone\n\n\n\nsample\nNoneType\nNone\n\n\n\nshuffle_fn\nNoneType\nNone\n\n\n\ndo_batch\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nNumpyDataLoader\n\n NumpyDataLoader (dataset, bs=64, shuffle=False, drop_last=False,\n num_workers=0, verbose=False, do_setup=True, vocab=None,\n sort=False, weights=None, partial_n=None, sampler=None,\n pin_memory=False, timeout=0, batch_size=None,\n indexed=None, n=None, device=None,\n persistent_workers=False, pin_memory_device='',\n wif=None, before_iter=None, after_item=None,\n before_batch=None, after_batch=None, after_iter=None,\n create_batches=None, create_item=None,\n create_batch=None, retain=None, get_idxs=None,\n sample=None, shuffle_fn=None, do_batch=None)\n\nTransformed DataLoader\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ndataset\n\n\nMap- or iterable-style dataset from which to load the data\n\n\nbs\nint\n64\nSize of batch\n\n\nshuffle\nbool\nFalse\nWhether to shuffle data\n\n\ndrop_last\nbool\nFalse\n\n\n\nnum_workers\nint\nNone\nNumber of CPU cores to use in parallel (default: All available up to 16)\n\n\nverbose\nbool\nFalse\nWhether to print verbose logs\n\n\ndo_setup\nbool\nTrue\nWhether to run setup() for batch transform(s)\n\n\nvocab\nNoneType\nNone\n\n\n\nsort\nbool\nFalse\n\n\n\nweights\nNoneType\nNone\n\n\n\npartial_n\nNoneType\nNone\n\n\n\nsampler\nNoneType\nNone\n\n\n\npin_memory\nbool\nFalse\n\n\n\ntimeout\nint\n0\n\n\n\nbatch_size\nNoneType\nNone\n\n\n\nindexed\nNoneType\nNone\n\n\n\nn\nNoneType\nNone\n\n\n\ndevice\nNoneType\nNone\n\n\n\npersistent_workers\nbool\nFalse\n\n\n\npin_memory_device\nstr\n\n\n\n\nwif\nNoneType\nNone\n\n\n\nbefore_iter\nNoneType\nNone\n\n\n\nafter_item\nNoneType\nNone\n\n\n\nbefore_batch\nNoneType\nNone\n\n\n\nafter_batch\nNoneType\nNone\n\n\n\nafter_iter\nNoneType\nNone\n\n\n\ncreate_batches\nNoneType\nNone\n\n\n\ncreate_item\nNoneType\nNone\n\n\n\ncreate_batch\nNoneType\nNone\n\n\n\nretain\nNoneType\nNone\n\n\n\nget_idxs\nNoneType\nNone\n\n\n\nsample\nNoneType\nNone\n\n\n\nshuffle_fn\nNoneType\nNone\n\n\n\ndo_batch\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nTSDataLoaders\n\n TSDataLoaders (*loaders, path='.', device=None)\n\nBasic wrapper around several DataLoaders.\n\nsource\n\n\nNumpyDataLoaders\n\n NumpyDataLoaders (*loaders, path='.', device=None)\n\nBasic wrapper around several DataLoaders.\n\nsource\n\n\nStratifiedSampler\n\n StratifiedSampler (y, bs:int=64, shuffle:bool=False,\n drop_last:bool=False)\n\nSampler where batches preserve the percentage of samples for each class\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\ny\n\n\nThe target variable for supervised learning problems. Stratification is done based on the y labels.\n\n\nbs\nint\n64\nBatch size\n\n\nshuffle\nbool\nFalse\nFlag to shuffle each class’s samples before splitting into batches.\n\n\ndrop_last\nbool\nFalse\nFlag to drop the last incomplete batch.\n\n\n\n\na = np.concatenate([np.zeros(90), np.ones(10)])\nsampler = StratifiedSampler(a, bs=32, shuffle=True, drop_last=True)\nidxs = np.array(list(iter(sampler)))\nprint(idxs[:32])\nprint(a[idxs][:32])\ntest_eq(a[idxs][:32].mean(), .1)\n\n[[ 0 2 8 17 18 21 27 29 34 38 39 43 45 48 52 54 55 60 61 63 66 67 68 69\n 71 73 78 80 81 84 90 92 95 99 1 6 11 12 15 16 20 23 24 28 30 33 36 37\n 40 41 42 44 49 59 62 64 65 74 75 76 77 79 86 87 91 93 96 3 4 5 7 9\n 10 13 14 19 22 25 26 31 32 35 46 47 50 51 53 56 57 58 70 72 82 83 85 88\n 89 94 97 98]]\n[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 1. 1.]]\n\n\n\nsource\n\n\nget_c\n\n get_c (dls)\n\n\nsource\n\n\nget_best_dls_params\n\n get_best_dls_params (dls, n_iters=10, num_workers=[0, 1, 2, 4, 8],\n pin_memory=[True, False], prefetch_factor=[2, 4, 8],\n return_best=True, verbose=True)\n\n\nsource\n\n\nget_best_dl_params\n\n get_best_dl_params (dl, n_iters=10, num_workers=[0, 1, 2, 4, 8],\n pin_memory=[True, False], prefetch_factor=[2, 4, 8],\n return_best=True, verbose=True)\n\n\nsource\n\n\nget_ts_dls\n\n get_ts_dls (X, y=None, splits=None, sel_vars=None, sel_steps=None,\n tfms=None, inplace=True, path='.', bs=64, batch_tfms=None,\n num_workers=0, device=None, shuffle_train=True,\n drop_last=True, weights=None, partial_n=None, sampler=None,\n sort=False, **kwargs)\n\n\n# Tests\na = np.arange(10)\n\nfor s in [None, np.arange(10), np.arange(10).tolist(), L(np.arange(10).tolist()), (np.arange(10).tolist(), None), (np.arange(10).tolist(), L())]:\n test_eq(_check_splits(a, s), (L(np.arange(10).tolist()), L()))\n\n\nsource\n\n\nget_subset_dl\n\n get_subset_dl (dl, idxs)\n\n\nsource\n\n\nget_ts_dl\n\n get_ts_dl (X, y=None, split=None, sel_vars=None, sel_steps=None,\n tfms=None, inplace=True, path='.', bs=64, batch_tfms=None,\n num_workers=0, device=None, shuffle_train=True,\n drop_last=True, weights=None, partial_n=None, sampler=None,\n sort=False, **kwargs)\n\n\nX, y, splits = get_UCR_data(dsid, on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)\ndls = get_best_dls_params(dls, prefetch_factor=[2, 4, 8, 16])\n\n\nDataloader 0\n\n num_workers: 0 pin_memory: True prefetch_factor: 2 - time: 1.400 ms/iter\n num_workers: 0 pin_memory: False prefetch_factor: 2 - time: 0.620 ms/iter\n\n best dl params:\n best num_workers : 0\n best pin_memory : False\n best prefetch_factor: 2\n return_best : True\n\n\n\nDataloader 1\n\n num_workers: 0 pin_memory: True prefetch_factor: 2 - time: 0.261 ms/iter\n num_workers: 0 pin_memory: False prefetch_factor: 2 - time: 0.306 ms/iter\n\n best dl params:\n best num_workers : 0\n best pin_memory : True\n best prefetch_factor: 2\n return_best : True\n\n\n\n\n\ny_int = np.random.randint(0, 4, size=len(X))\ndls = get_ts_dls(X, y_int, splits=splits, bs=8)\ntest_eq(hasattr(dls, \"vocab\"), False)\n\ndls = get_ts_dls(X, y_int, splits=splits, bs=8, vocab=[0,1,2,3])\ntest_eq(dls.vocab, [0,1,2,3])\ntest_eq(dls.c, 4)\ntest_eq(dls.cat, True)\n\n\nX, y, splits = get_UCR_data(dsid, on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)\nb=first(dls.train)\ndls.decode(b)\ntest_eq(X.shape[1], dls.vars)\ntest_eq(X.shape[-1], dls.len)\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=True)\n\nidxs = random_choice(len(dls.valid_ds), 10, False)\nnew_dl = get_subset_dl(dls.train, idxs)\n\nidxs = random_choice(len(dls.valid_ds), 10, False)\nnew_dl = get_subset_dl(dls.valid, idxs)\ntest_eq(new_dl.one_batch()[0].cpu().numpy(), X[splits[1][idxs]])\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\nweights = np.random.rand(len(X))\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=True, weights=weights)\nweights2 = weights[splits[0]] / weights[splits[0]].sum()\ntest_eq(dls.train.weights, weights2)\ntest_eq(dls.valid.weights, None)\n\n\npartial_n = 12\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=64, inplace=True, partial_n=partial_n)\ntest_eq(len(dls.train.one_batch()[0]), partial_n)\n\npartial_n = .1\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], bs=64, inplace=True, partial_n=partial_n)\ntest_eq(len(dls.train.one_batch()[0]), int(round(len(dls.train.dataset) * partial_n)))\n\nYou’ll now be able to pass a sampler to a tsai dataloader.\nYou should use a sampler for the train set and a sampler for the validation set. You’ll need to pass an object with the same length as each dataset. For example, the splits like in the case below.\n⚠️ Remember to set shuffle=False when using a sampler since they a mutually exclusive. This means that when you use a sampler, you always need to set the shuffle in the dataloader to False. The sampler will control whether the indices are shuffled or not (you can set shuffle to True or False in the sampler).\ndrop_last is managed in the dataloder though.\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ntrain_sampler = torch.utils.data.sampler.RandomSampler(splits[0])\nvalid_sampler = torch.utils.data.sampler.SequentialSampler(splits[1])\ndls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=8, inplace=True,\n shuffle=False, drop_last=True, sampler=[train_sampler, valid_sampler])\nprint('train')\nfor _ in dls.train:\n print(dls.train.idxs)\nprint('valid')\nfor _ in dls.valid:\n print(dls.valid.idxs)\n\ntrain\n[22, 25, 16, 3, 26, 28, 7, 18]\n[5, 4, 12, 27, 29, 24, 9, 11]\n[0, 2, 8, 17, 21, 20, 23, 10]\nvalid\n[0, 1, 2, 3, 4, 5, 6, 7]\n[8, 9, 10, 11, 12, 13, 14, 15]\n[16, 17, 18, 19, 20, 21, 22, 23]\n[24, 25, 26, 27, 28, 29]\n\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ntrain_sampler = torch.utils.data.sampler.SequentialSampler(splits[0])\nvalid_sampler = torch.utils.data.sampler.SequentialSampler(splits[1])\ndls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=64, inplace=True,\n shuffle=False, sampler=[train_sampler, valid_sampler])\ntest_eq(dls.get_idxs(), np.arange(len(splits[0])))\ntest_eq(dls.train.get_idxs(), np.arange(len(splits[0])))\ntest_eq(dls.valid.get_idxs(), np.arange(len(splits[1])))\nxb = dls.valid.one_batch()[0].cpu().numpy()\ntest_close(xb, X[dls.valid.split_idxs])\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ntrain_sampler = torch.utils.data.sampler.RandomSampler(splits[0])\nvalid_sampler = torch.utils.data.sampler.SequentialSampler(splits[0])\ndls = get_ts_dls(X, y, splits=splits, tfms=[None, TSClassification()], bs=32, inplace=True,\n shuffle=False, drop_last=True, sampler=[train_sampler, valid_sampler])\ntest_ne(dls.train.get_idxs(), np.arange(len(splits[0])))\ntest_eq(np.sort(dls.train.get_idxs()), np.arange(len(splits[0])))\ntest_eq(dls.valid.get_idxs(), np.arange(len(splits[1])))\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=64, inplace=False)\n\nidxs = random_choice(len(dls.valid_ds), 10, False)\nnew_dl = get_subset_dl(dls.train, idxs)\n\nidxs = random_choice(len(dls.valid_ds), 10, False)\nnew_dl = get_subset_dl(dls.valid, idxs)\ntest_eq(new_dl.one_batch()[0].cpu().numpy(), X[splits[1][idxs]])\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSClassification()], splits=splits, bs=8)\nb = dls.one_batch()\ninput_idxs = dls.input_idxs\ntest_eq(b[0].cpu().numpy(), X[input_idxs])\nb = dls.train.one_batch()\ninput_idxs = dls.train.input_idxs\ntest_eq(b[0].cpu().numpy(), X[input_idxs])\nassert max(input_idxs) < len(splits[0])\nb = dls.valid.one_batch()\ninput_idxs = dls.valid.input_idxs\ntest_eq(b[0].cpu().numpy(), X[input_idxs])\nassert min(input_idxs) >= len(splits[0])\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8)\nb=first(dls.train)\ndls.decode(b)\ntest_eq(X.shape[1], dls.vars)\ntest_eq(X.shape[-1], dls.len)\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8, weights=np.random.randint(0, 3, len(y)))\nb=first(dls.train)\ndls.decode(b)\ntest_eq(X.shape[1], dls.vars)\ntest_eq(X.shape[-1], dls.len)\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)\nts_dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, device=default_device(), bs=4)\ntorch.save(ts_dls, 'export/ts_dls.pth')\ndel ts_dls\nts_dls = torch.load('export/ts_dls.pth')\nfor xb,yb in ts_dls.train:\n test_eq(tensor(X[ts_dls.train.idxs]), xb.cpu())\n\n\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=4)\nfor xb,yb in dls.train:\n test_eq(xb.cpu().numpy(), X[dls.train.input_idxs])\nfor xb,yb in dls.valid:\n test_eq(xb.cpu().numpy(), X[dls.valid.input_idxs])\n\n\ntest_eq((ts_dls.train.shuffle, ts_dls.valid.shuffle, ts_dls.train.drop_last, ts_dls.valid.drop_last), (True, False, True, False))\n\n\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, bs=8, num_workers=0)\nxb, yb = first(dls.train)\ntest_eq(tensor(X[dls.train.idxs]), xb.cpu())\n\n\ntest_eq((dls.train.shuffle, dls.valid.shuffle, dls.train.drop_last, dls.valid.drop_last), (True, False, True, False))\n\n\n# multiclass\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)\ndls = get_ts_dls(X, y, tfms=[None, TSCategorize()], splits=splits, inplace=True)\ndls.show_dist()\ndls.train.show_dist()\nxb,yb = first(dls.train)\ntest_eq((dls.cat, dls.c), (True, 4))\ntest_ne(dls.cws.cpu().numpy(), None)\ndls.decoder((xb, ))\ndls.decoder((xb[0], ))\ndls.decoder((xb, yb))\ndls.decoder((xb[0], yb[0]))\ndls.decoder(yb)\ndls.decoder(yb[0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'1'\n\n\n\nnew_dl = dls.new_dl(X)\nfirst(new_dl)\n\n(TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32),)\n\n\n\nnew_dl = dls.new_dl(X, y=y)\nfirst(new_dl)\n\n(TSTensor(samples:60, vars:1, len:570, device=cpu, dtype=torch.float32),\n TensorCategory([2, 3, 2, 2, 0, 1, 1, 3, 3, 1, 2, 0, 0, 3, 0, 1, 0, 3, 3, 3, 1,\n 3, 3, 3, 3, 3, 0, 3, 1, 1, 3, 3, 2, 3, 3, 3, 1, 1, 3, 2, 3, 0,\n 3, 0, 3, 1, 1, 2, 1, 1, 1, 3, 3, 1, 2, 1, 1, 3, 0, 0]))\n\n\n\ndls.train.dataset.split_idxs, dls.train.dataset.splits, dls.valid.split_idxs\n\n(array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=int8),\n (#30) [0,1,2,3,4,5,6,7,8,9...],\n array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,\n 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59], dtype=int8))\n\n\n\n# 2d input array and tfms == None return a NoTfmLists object\nX, y, splits = get_UCR_data('OliveOil', on_disk=False, split_data=False)\nX = X[:, 0]\ntfms=[None, TSCategorize()]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, bs=8)\ntest_eq(1, dls.vars)\ntest_eq(X.shape[-1], dls.len)\ntest_eq(type(dls.tls[0]).__name__, 'NoTfmLists')\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, bs=8, inplace=False)\ntest_eq(1, dls.vars)\ntest_eq(X.shape[-1], dls.len)\ntest_eq(type(dls.tls[0]).__name__, 'NoTfmLists')\n\n\n# regression\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)\ndls = get_ts_dls(X, np.random.rand(60, ), tfms=[None, ToNumpyTensor], splits=splits)\ndls.show_dist()\ndls.train.show_dist()\nxb,yb = first(dls.train)\ndls.decoder((xb, ))\ndls.decoder((xb[0], ))\ndls.decoder((xb, yb))\ndls.decoder((xb[0], yb[0]))\ndls.decoder(yb)\ndls.decoder(yb[0])\ntest_eq((dls.cat, dls.c), (False, 1))\ntest_eq(dls.cws, None)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# regression, multilabel\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)\ndls = get_ts_dls(X, np.random.rand(60, 3) * 5, tfms=[None, ToNumpyTensor], splits=splits)\ndls.show_dist()\ndls.train.show_dist()\nxb,yb = first(dls.train)\ndls.decoder((xb, ))\ndls.decoder((xb[0], ))\ndls.decoder((xb, yb))\ndls.decoder((xb[0], yb[0]))\ndls.decoder(yb)\ndls.decoder(yb[0])\ntest_eq((dls.cat, dls.c, dls.d),(False, 1, 3))\ntest_eq(dls.cws, None)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# multiclass, multilabel\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)\ncm = {\n '1':'A',\n '2':['B', 'C'],\n '3':['B', 'D'] ,\n '4':'E',\n }\nkeys = cm.keys()\nnew_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])}\ny_multi = np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y], dtype=object)\ndls = get_ts_dls(X, y_multi, tfms=[None, TSMultiLabelClassification()], splits=splits)\ndls.show_dist()\ndls.train.show_dist()\nxb,yb = first(dls.train)\ndls.decoder((xb, ))\ndls.decoder((xb[0], ))\ndls.decoder((xb, yb))\ndls.decoder((xb[0], yb[0]))\ndls.decoder(yb)\ndls.decoder(yb[0])\ntest_eq((dls.cat, dls.c), (True, 5))\ntest_ne(dls.cws.cpu().numpy(), None)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndsid = 'OliveOil'\nX, y, splits = get_UCR_data(dsid, on_disk=True, split_data=False)\ncm = {\n '1':'A',\n '2':['B', 'C'],\n '3':['B', 'D'] ,\n '4':'E',\n }\nkeys = cm.keys()\nnew_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])}\ny_multi = np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y], dtype=object)\ndls = get_ts_dls(X, y_multi, tfms=[None, TSMultiLabelClassification()], splits=splits)\ntest_eq(dls.new(X[0]).one_batch().shape, (1, 570))\ntest_eq(dls.new(X[:15]).one_batch().shape, (15, 1, 570))\ntest_eq(dls.train.new(X[0]).one_batch().shape, (1, 570))\ntest_eq(dls.valid.new(X[:15]).one_batch().shape, (15, 1, 570))\n\n\nbs = 25\ndsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2], batch_tfms=add(1), num_workers=0)\nxb,yb = dls.train.one_batch()\ntest_eq(xb.cpu().data, tensor(X_on_disk[splits[0]][dls.train.idxs]) + 1)\n\n\ndsets = TSDatasets(X, y, tfms=[None, TSCategorize()], splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])\nxb,yb = dls.train.one_batch()\ntest_eq(xb.shape, (min(bs, len(splits[0])), X.shape[1], X.shape[-1]))\nit = iter(dls.valid)\nfor xb,yb in it:\n test_close(xb.cpu(), TSTensor(X[splits[1]][dls.valid.idxs]))\n\n\nbs = 64\ndsets = TSDatasets(X, y, tfms=[add(1), TSCategorize()], splits=RandomSplitter(valid_pct=.3)(y_array))\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])\nxb,yb = dls.train.one_batch()\ntest_eq(xb.shape, (min(bs, len(dsets.train)), X_on_disk.shape[1], X_on_disk.shape[-1]))\nxb,yb = dls.valid.one_batch()\ntest_eq(xb.shape, (min(bs*2, len(dsets.valid)), X_on_disk.shape[1], X_on_disk.shape[-1]))\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()], splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[32, 64])\nfor i in range(10):\n dl = dls.train if random.random() < .5 else dls.valid\n xb,yb = dl.one_batch()\n torch.equal(xb.cpu(), TSTensor(X_on_disk[dl.input_idxs]))\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()])\ndls = TSDataLoaders.from_dsets(dsets, bs=32)\nfor i in range(10):\n xb,yb = dls.one_batch()\n torch.equal(xb.cpu(), TSTensor(X_on_disk[dl.input_idxs]))\n\ndsets = TSDatasets(X_on_disk, tfms=None)\ndls = TSDataLoaders.from_dsets(dsets, bs=32)\nfor i in range(10):\n xb = dls.one_batch()\n torch.equal(xb[0].cpu(), TSTensor(X_on_disk[dl.input_idxs]))\n\n\ndsets = TSDatasets(X_on_disk, y_array, tfms=[None, TSCategorize()])\ndls = TSDataLoaders.from_dsets(dsets, bs=32)\ntest_eq(dls.split_idxs, L(np.arange(len(X_on_disk)).tolist()))\n\n\nX, y, splits = get_UCR_data('NATOPS', return_split=False)\ntfms = [None, [TSCategorize()]]\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, bs=[64, 128])\ndls.show_batch()\ndls.show_dist()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# test passing a list with categories instead of a numpy array\ndsid = 'NATOPS'\nbs = 64\nX2, y2, splits2 = get_UCR_data(dsid, return_split=False)\nvocab = sorted(set(y))\ntfms = [None, [TSCategorize(vocab=vocab)]]\ndsets = TSDatasets(X2, y2, tfms=tfms, splits=splits2)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=[bs, bs*2])\ndls.train.one_batch()\n\n(TSTensor(samples:64, vars:24, len:51, device=cpu, dtype=torch.float32),\n TensorCategory([0, 3, 0, 5, 0, 0, 5, 3, 3, 1, 2, 0, 0, 2, 5, 2, 2, 4, 5, 3, 2,\n 4, 2, 1, 1, 0, 1, 2, 0, 4, 4, 4, 4, 2, 0, 0, 3, 3, 0, 5, 4, 3,\n 2, 5, 5, 2, 2, 4, 3, 0, 2, 4, 4, 5, 5, 0, 5, 3, 2, 1, 0, 3, 4,\n 2]))\n\n\n\n# MultiCategory\nbs = 64\nn_epochs = 100\ntfms = [None, [MultiCategorize()]]\ndsets = TSDatasets(X2, y2, tfms=tfms, splits=splits2)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=bs)\ndls.train.one_batch()\n\n(TSTensor(samples:64, vars:24, len:51, device=cpu, dtype=torch.float32),\n TensorMultiCategory([[7, 0, 1],\n [4, 0, 1],\n [7, 0, 1],\n [5, 0, 1],\n [2, 0, 1],\n [2, 0, 1],\n [2, 0, 1],\n [7, 0, 1],\n [5, 0, 1],\n [3, 0, 1],\n [6, 0, 1],\n [7, 0, 1],\n [3, 0, 1],\n [6, 0, 1],\n [7, 0, 1],\n [7, 0, 1],\n [6, 0, 1],\n [7, 0, 1],\n [5, 0, 1],\n [3, 0, 1],\n [3, 0, 1],\n [7, 0, 1],\n [7, 0, 1],\n [2, 0, 1],\n [4, 0, 1],\n [4, 0, 1],\n [2, 0, 1],\n [4, 0, 1],\n [6, 0, 1],\n [2, 0, 1],\n [2, 0, 1],\n [5, 0, 1],\n [2, 0, 1],\n [5, 0, 1],\n [4, 0, 1],\n [7, 0, 1],\n [2, 0, 1],\n [3, 0, 1],\n [4, 0, 1],\n [6, 0, 1],\n [2, 0, 1],\n [7, 0, 1],\n [2, 0, 1],\n [3, 0, 1],\n [4, 0, 1],\n [5, 0, 1],\n [5, 0, 1],\n [2, 0, 1],\n [5, 0, 1],\n [2, 0, 1],\n [3, 0, 1],\n [5, 0, 1],\n [6, 0, 1],\n [7, 0, 1],\n [5, 0, 1],\n [2, 0, 1],\n [7, 0, 1],\n [4, 0, 1],\n [5, 0, 1],\n [6, 0, 1],\n [7, 0, 1],\n [4, 0, 1],\n [7, 0, 1],\n [3, 0, 1]]))\n\n\nThe combination of splits, sel_vars and sel_steps is very powerful, as it allows you to perform advanced indexing of the array-like X.\n\nfrom tsai.data.validation import TSSplitter\n\n\nX = np.arange(16*5*50).reshape(16,5,50)\ny = alphabet[np.random.randint(0,3, 16)]\nsplits = TSSplitter(show_plot=False)(y)\ntfms = [None, TSCategorize()]\nbatch_tfms = None\ndls = get_ts_dls(X, y, splits=splits, sel_vars=[0, 1, 3], sel_steps=slice(-10, None), tfms=tfms, batch_tfms=batch_tfms)\nxb,yb=dls.train.one_batch()\ntest_close(X[dls.input_idxs][:, [0, 1, 3]][...,slice(-10, None)], xb.cpu().numpy())\nnew_dl = dls.train.new_dl(X[:5], y[:5])\nprint(new_dl.one_batch())\nnew_empty_dl = dls.new_empty() # when exported\ndl = new_empty_dl.new_dl(X[:10], y[:10], bs=64) # after export\ndl.one_batch()\n\n(TSTensor(samples:5, vars:3, len:10, device=cpu, dtype=torch.int64), TensorCategory([2, 2, 2, 2, 2]))\n\n\n(TSTensor(samples:10, vars:3, len:10, device=cpu, dtype=torch.int64),\n TensorCategory([2, 2, 2, 0, 2, 2, 0, 2, 1, 1]))\n\n\n\nsource\n\n\nget_dl_percent_per_epoch\n\n get_dl_percent_per_epoch (dl, model, n_batches=None)\n\n\nsource\n\n\nget_time_per_batch\n\n get_time_per_batch (dl, model=None, n_batches=None)\n\n\nX, y, splits = get_UCR_data('NATOPS', split_data=False)\ntfms = [None, [TSCategorize()]]\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits)\ntrain_dl = dls.train\nxb, _ = train_dl.one_batch()\nmodel = nn.Linear(xb.shape[-1], 2).to(xb.device)\nt = get_dl_percent_per_epoch(train_dl, model, n_batches=10)\nprint(t)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n93.70%", + "crumbs": [ + "Data", + "Data Core" + ] + }, + { + "objectID": "models.xresnet1d.html", + "href": "models.xresnet1d.html", + "title": "XResNet1d", + "section": "", + "text": "This is a modified version of fastai’s XResNet model in github\n\n\nsource\n\nxresnet1d50_deeper\n\n xresnet1d50_deeper (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34_deeper\n\n xresnet1d34_deeper (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18_deeper\n\n xresnet1d18_deeper (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True,\n transpose=False, init='auto', xtra=None,\n bias_std=0.01, dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d50_deep\n\n xresnet1d50_deep (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None, dw=False,\n g2=1, sa=False, sym=False, norm_type=<NormType.Batch:\n 1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34_deep\n\n xresnet1d34_deep (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None, dw=False,\n g2=1, sa=False, sym=False, norm_type=<NormType.Batch:\n 1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18_deep\n\n xresnet1d18_deep (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1,\n groups=1, reduction=None, nh1=None, nh2=None, dw=False,\n g2=1, sa=False, sym=False, norm_type=<NormType.Batch:\n 1>, act_cls=<class 'torch.nn.modules.activation.ReLU'>,\n ndim=2, ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d152\n\n xresnet1d152 (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=<NormType.Batch: 1>,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>, ndim=2,\n ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d101\n\n xresnet1d101 (c_in, c_out, act=<class\n 'torch.nn.modules.activation.ReLU'>, stride=1, groups=1,\n reduction=None, nh1=None, nh2=None, dw=False, g2=1,\n sa=False, sym=False, norm_type=<NormType.Batch: 1>,\n act_cls=<class 'torch.nn.modules.activation.ReLU'>, ndim=2,\n ks=3, pool=<function AvgPool>, pool_first=True,\n padding=None, bias=None, bn_1st=True, transpose=False,\n init='auto', xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d50\n\n xresnet1d50 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,\n stride=1, groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True, padding=None,\n bias=None, bn_1st=True, transpose=False, init='auto',\n xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d34\n\n xresnet1d34 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,\n stride=1, groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True, padding=None,\n bias=None, bn_1st=True, transpose=False, init='auto',\n xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nsource\n\n\nxresnet1d18\n\n xresnet1d18 (c_in, c_out, act=<class 'torch.nn.modules.activation.ReLU'>,\n stride=1, groups=1, reduction=None, nh1=None, nh2=None,\n dw=False, g2=1, sa=False, sym=False,\n norm_type=<NormType.Batch: 1>, act_cls=<class\n 'torch.nn.modules.activation.ReLU'>, ndim=2, ks=3,\n pool=<function AvgPool>, pool_first=True, padding=None,\n bias=None, bn_1st=True, transpose=False, init='auto',\n xtra=None, bias_std=0.01,\n dilation:Union[int,Tuple[int,int]]=1,\n padding_mode:str='zeros', device=None, dtype=None)\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\n\n\n\n\n\nc_out\n\n\n\n\n\nact\ntype\nReLU\n\n\n\nstride\nint\n1\n\n\n\ngroups\nint\n1\n\n\n\nreduction\nNoneType\nNone\n\n\n\nnh1\nNoneType\nNone\n\n\n\nnh2\nNoneType\nNone\n\n\n\ndw\nbool\nFalse\n\n\n\ng2\nint\n1\n\n\n\nsa\nbool\nFalse\n\n\n\nsym\nbool\nFalse\n\n\n\nnorm_type\nNormType\nNormType.Batch\n\n\n\nact_cls\ntype\nReLU\n\n\n\nndim\nint\n2\n\n\n\nks\nint\n3\n\n\n\npool\nfunction\nAvgPool\n\n\n\npool_first\nbool\nTrue\n\n\n\npadding\nNoneType\nNone\n\n\n\nbias\nNoneType\nNone\n\n\n\nbn_1st\nbool\nTrue\n\n\n\ntranspose\nbool\nFalse\n\n\n\ninit\nstr\nauto\n\n\n\nxtra\nNoneType\nNone\n\n\n\nbias_std\nfloat\n0.01\n\n\n\ndilation\ntyping.Union[int, typing.Tuple[int, int]]\n1\n\n\n\npadding_mode\nstr\nzeros\nTODO: refine this type\n\n\ndevice\nNoneType\nNone\n\n\n\ndtype\nNoneType\nNone\n\n\n\n\n\nbs, c_in, seq_len = 2, 4, 32\nc_out = 2\nx = torch.rand(bs, c_in, seq_len)\narchs = [\n xresnet1d18, xresnet1d34, xresnet1d50, \n xresnet1d18_deep, xresnet1d34_deep, xresnet1d50_deep, xresnet1d18_deeper,\n xresnet1d34_deeper, xresnet1d50_deeper\n# # Long test\n# xresnet1d101, xresnet1d152,\n]\nfor i, arch in enumerate(archs):\n print(i, arch.__name__)\n test_eq(arch(c_in, c_out, sa=True, act=Mish)(x).shape, (bs, c_out))\n\n0 xresnet1d18\n1 xresnet1d34\n2 xresnet1d50\n3 xresnet1d18_deep\n4 xresnet1d34_deep\n5 xresnet1d50_deep\n6 xresnet1d18_deeper\n7 xresnet1d34_deeper\n8 xresnet1d50_deeper\n\n\n\nm = xresnet1d34(4, 2, act=Mish)\ntest_eq(len(get_layers(m, is_bn)), 38)\ntest_eq(check_weight(m, is_bn)[0].sum(), 22)", + "crumbs": [ + "Models", + "CNNs", + "XResNet1d" + ] + }, + { + "objectID": "tslearner.html", + "href": "tslearner.html", + "title": "TSLearner", + "section": "", + "text": "New set of time series learners with a new sklearn-like API that simplifies the learner creation. The following classes are included:", + "crumbs": [ + "Training", + "TSLearner" + ] + }, + { + "objectID": "tslearner.html#tsclassifier-api", + "href": "tslearner.html#tsclassifier-api", + "title": "TSLearner", + "section": "TSClassifier API", + "text": "TSClassifier API\n\nCommonly used arguments:\n\nX: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.\ny: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.\nsplits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.\ntfms: item transforms that will be applied to each sample individually. Default:None.\nbatch_tfms: transforms applied to each batch. Default=None.\npipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.\nbs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).\narch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.\narch_config: keyword arguments passed to the selected architecture. Default={}.\npretrained: indicates if pretrained model weights will be used. Default=False.\nweights_path: indicates the path to the pretrained weights in case they are used.\nloss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).\nopt_func: allows you to pass an optimizer. Default=Adam.\nlr: learning rate. Default=0.001.\nmetrics: list of metrics passed to the Learner. Default=accuracy.\ncbs: list of callbacks passed to the Learner. Default=None.\nwd: is the default weight decay used when training the model. Default=None.\n\nLess frequently used arguments:\n\nsel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).\nsel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).\ns_cat_idxs: list of indices for static categorical variables\ns_cat_embeddings: list of num_embeddings for each static categorical variable\ns_cat_embedding_dims: list of embedding dimensions for each static categorical variable\ns_cont_idxs: list of indices for static continuous variables\no_cat_idxs: list of indices for observed categorical variables\no_cat_embeddings: list of num_embeddings for each observed categorical variable\no_cat_embedding_dims: list of embedding dimensions for each observed categorical variable\no_cont_idxs: list of indices for observed continuous variables\npatch_len: Number of time steps in each patch.\npatch_stride: Stride of the patch.\nfusion_layers: list of layer dimensions for the fusion MLP\nfusion_act: activation function for the fusion MLP\nfusion_dropout: dropout probability for the fusion MLP\nfusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP\nweights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.\npartial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.\nvocab: vocabulary used to transform the target. Only required when transformed is not perform by a dataloader’s tfm (external transforms).\ntrain_metrics: flag used to display metrics in the training set. Defaults to False.\nvalid_metrics: flag used to display metrics in the validtion set. Defaults to True.\ninplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.\nshuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.\ndrop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.\nnum_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=0.\ndo_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.\ndevice: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).\nseed: Set to an int to ensure reprodubibility. Default=None.\nverbose: controls the verbosity when fitting and predicting.\nexclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.\ncut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.\ninit: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.\nsplitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.\npath and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.\nwd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True\nmoms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).\n\n\nsource\n\nTSClassifier\n\n TSClassifier (X, y=None, splits=None, tfms=None, inplace=True,\n sel_vars=None, sel_steps=None, s_cat_idxs=None,\n s_cat_embeddings=None, s_cat_embedding_dims=None,\n s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,\n o_cat_embedding_dims=None, o_cont_idxs=None,\n patch_len=None, patch_stride=None, fusion_layers=128,\n fusion_act='relu', fusion_dropout=0.0, fusion_use_bn=True,\n weights=None, partial_n=None, vocab=None,\n train_metrics=False, valid_metrics=True, bs=[64, 128],\n batch_size=None, batch_tfms=None, pipelines=None,\n shuffle_train=True, drop_last=True, num_workers=0,\n do_setup=True, device=None, seed=None, arch=None,\n arch_config={}, pretrained=False, weights_path=None,\n exclude_head=True, cut=-1, init=None, loss_func=None,\n opt_func=<function Adam>, lr=0.001, metrics=<function\n accuracy>, cbs=None, wd=None, wd_bn_bias=False,\n train_bn=True, moms=(0.95, 0.85, 0.95), path='.',\n model_dir='models', splitter=<function trainable_params>,\n verbose=False)\n\nGroup together a model, some dls and a loss_func to handle training\n\nfrom tsai.data.external import *\nfrom tsai.data.preprocessing import *\nfrom tsai.models.InceptionTimePlus import *\n\n\n# With validation split\nX, y, splits = get_classification_data('OliveOil', split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = [TSStandardize(by_sample=True)]\nlearn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, arch=InceptionTimePlus, arch_config=dict(fc_dropout=.5),\n train_metrics=True)\nlearn.fit_one_cycle(1)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\ntrain_accuracy\nvalid_loss\nvalid_accuracy\ntime\n\n\n\n\n0\n1.446255\n0.266667\n1.403359\n0.300000\n00:00\n\n\n\n\n\n\n# Without validation split\nX, y, splits = get_classification_data('OliveOil', split_data=False)\nsplits = (splits[0], None)\ntfms = [None, TSClassification()]\nbatch_tfms = [TSStandardize(by_sample=True)]\nlearn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, arch=InceptionTimePlus, arch_config=dict(fc_dropout=.5),\n train_metrics=True)\nlearn.fit_one_cycle(1)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\naccuracy\ntime\n\n\n\n\n0\n1.286023\n0.400000\n00:00\n\n\n\n\n\n\nnum_classes = 5\nX = torch.rand(8, 2, 50)\ny = torch.randint(0, num_classes, (len(X), 3, 50))\nsplits = TimeSplitter(show_plot=False)(y)\nvocab = np.arange(num_classes)\n\nfail_test = []\nfor arch in all_arch_names:\n if not \"plus\" in arch.lower(): continue\n try:\n learn = TSClassifier(X, y, splits=splits, arch=arch, metrics=accuracy, vocab=vocab, device=default_device())\n with ContextManagers([learn.no_bar(), learn.no_logging()]):\n learn.fit_one_cycle(1, 1e-3)\n del learn\n gc.collect()\n except Exception as e:\n fail_test.append(arch)\n print(arch, e)\n\ntest_eq(fail_test, [])", + "crumbs": [ + "Training", + "TSLearner" + ] + }, + { + "objectID": "tslearner.html#tsregressor-api", + "href": "tslearner.html#tsregressor-api", + "title": "TSLearner", + "section": "TSRegressor API", + "text": "TSRegressor API\n\nCommonly used arguments:\n\nX: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.\ny: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.\nsplits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.\ntfms: item transforms that will be applied to each sample individually. Default=None.\nbatch_tfms: transforms applied to each batch. Default=None.\npipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.\nbs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).\narch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.\narch_config: keyword arguments passed to the selected architecture. Default={}.\npretrained: indicates if pretrained model weights will be used. Default=False.\nweights_path: indicates the path to the pretrained weights in case they are used.\nloss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).\nopt_func: allows you to pass an optimizer. Default=Adam.\nlr: learning rate. Default=0.001.\nmetrics: list of metrics passed to the Learner. Default=None.\ncbs: list of callbacks passed to the Learner. Default=None.\nwd: is the default weight decay used when training the model. Default=None.\n\nLess frequently used arguments:\n\nsel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).\nsel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).\ns_cat_idxs: list of indices for static categorical variables\ns_cat_embeddings: list of num_embeddings for each static categorical variable\ns_cat_embedding_dims: list of embedding dimensions for each static categorical variable\ns_cont_idxs: list of indices for static continuous variables\no_cat_idxs: list of indices for observed categorical variables\no_cat_embeddings: list of num_embeddings for each observed categorical variable\no_cat_embedding_dims: list of embedding dimensions for each observed categorical variable\no_cont_idxs: list of indices for observed continuous variables\npatch_len: Number of time steps in each patch.\npatch_stride: Stride of the patch.\nfusion_layers: list of layer dimensions for the fusion MLP\nfusion_act: activation function for the fusion MLP\nfusion_dropout: dropout probability for the fusion MLP\nfusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP\nweights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.\npartial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.\ntrain_metrics: flag used to display metrics in the training set. Defaults to False.\nvalid_metrics: flag used to display metrics in the validtion set. Defaults to True.\ninplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.\nshuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.\ndrop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.\nnum_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=0.\ndo_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.\ndevice: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).\nseed: Set to an int to ensure reprodubibility. Default=None.\nverbose: controls the verbosity when fitting and predicting.\nexclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.\ncut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.\ninit: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.\nsplitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.\npath and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.\nwd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True\nmoms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).\n\n\nsource\n\nTSRegressor\n\n TSRegressor (X, y=None, splits=None, tfms=None, inplace=True,\n sel_vars=None, sel_steps=None, s_cat_idxs=None,\n s_cat_embeddings=None, s_cat_embedding_dims=None,\n s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,\n o_cat_embedding_dims=None, o_cont_idxs=None, patch_len=None,\n patch_stride=None, fusion_layers=128, fusion_act='relu',\n fusion_dropout=0.0, fusion_use_bn=True, weights=None,\n partial_n=None, train_metrics=False, valid_metrics=True,\n bs=[64, 128], batch_size=None, batch_tfms=None,\n pipelines=None, shuffle_train=True, drop_last=True,\n num_workers=0, do_setup=True, device=None, seed=None,\n arch=None, arch_config={}, pretrained=False,\n weights_path=None, exclude_head=True, cut=-1, init=None,\n loss_func=None, opt_func=<function Adam>, lr=0.001,\n metrics=None, cbs=None, wd=None, wd_bn_bias=False,\n train_bn=True, moms=(0.95, 0.85, 0.95), path='.',\n model_dir='models', splitter=<function trainable_params>,\n verbose=False)\n\nGroup together a model, some dls and a loss_func to handle training\n\nX, y, splits = get_regression_data('AppliancesEnergy', split_data=False)\nif X is not None: # This is to prevent a test fail when the data server is not available\n X = X.astype('float32')\n y = y.astype('float32')\n batch_tfms = [TSStandardize()]\n learn = TSRegressor(X, y, splits=splits, batch_tfms=batch_tfms, arch=None, metrics=mae, bs=512, train_metrics=True, device=default_device())\n learn.fit_one_cycle(1, 1e-4)\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\ntrain_mae\nvalid_loss\nvalid_mae\ntime\n\n\n\n\n0\n221.239578\n14.241582\n208.787231\n14.034328\n00:00", + "crumbs": [ + "Training", + "TSLearner" + ] + }, + { + "objectID": "tslearner.html#tsforecaster-api", + "href": "tslearner.html#tsforecaster-api", + "title": "TSLearner", + "section": "TSForecaster API", + "text": "TSForecaster API\n\nCommonly used arguments:\n\nX: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.\ny: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.\nsplits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.\ntfms: item transforms that will be applied to each sample individually. Default=None.\nbatch_tfms: transforms applied to each batch. Default=None.\npipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.\nbs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).\narch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.\narch_config: keyword arguments passed to the selected architecture. Default={}.\npretrained: indicates if pretrained model weights will be used. Default=False.\nweights_path: indicates the path to the pretrained weights in case they are used.\nloss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).\nopt_func: allows you to pass an optimizer. Default=Adam.\nlr: learning rate. Default=0.001.\nmetrics: list of metrics passed to the Learner. Default=None.\ncbs: list of callbacks passed to the Learner. Default=None.\nwd: is the default weight decay used when training the model. Default=None.\n\nLess frequently used arguments:\n\nsel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).\nsel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).\ns_cat_idxs: list of indices for static categorical variables\ns_cat_embeddings: list of num_embeddings for each static categorical variable\ns_cat_embedding_dims: list of embedding dimensions for each static categorical variable\ns_cont_idxs: list of indices for static continuous variables\no_cat_idxs: list of indices for observed categorical variables\no_cat_embeddings: list of num_embeddings for each observed categorical variable\no_cat_embedding_dims: list of embedding dimensions for each observed categorical variable\no_cont_idxs: list of indices for observed continuous variables\npatch_len: Number of time steps in each patch.\npatch_stride: Stride of the patch.\nfusion_layers: list of layer dimensions for the fusion MLP\nfusion_act: activation function for the fusion MLP\nfusion_dropout: dropout probability for the fusion MLP\nfusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP\nweights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.\npartial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.\ntrain_metrics: flag used to display metrics in the training set. Defaults to False.\nvalid_metrics: flag used to display metrics in the validtion set. Defaults to True.\ninplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.\nshuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.\ndrop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.\nnum_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=None.\ndo_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.\ndevice: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).\nseed: Set to an int to ensure reprodubibility. Default=None.\nverbose: controls the verbosity when fitting and predicting.\nexclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.\ncut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.\ninit: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.\nsplitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.\npath and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.\nwd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True\nmoms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).\n\n\nsource\n\nTSForecaster\n\n TSForecaster (X, y=None, splits=None, tfms=None, inplace=True,\n sel_vars=None, sel_steps=None, s_cat_idxs=None,\n s_cat_embeddings=None, s_cat_embedding_dims=None,\n s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,\n o_cat_embedding_dims=None, o_cont_idxs=None,\n patch_len=None, patch_stride=None, fusion_layers=128,\n fusion_act='relu', fusion_dropout=0.0, fusion_use_bn=True,\n weights=None, partial_n=None, train_metrics=False,\n valid_metrics=True, bs=[64, 128], batch_size=None,\n batch_tfms=None, pipelines=None, shuffle_train=True,\n drop_last=True, num_workers=0, do_setup=True, device=None,\n seed=None, arch=None, arch_config={}, pretrained=False,\n weights_path=None, exclude_head=True, cut=-1, init=None,\n loss_func=None, opt_func=<function Adam>, lr=0.001,\n metrics=None, cbs=None, wd=None, wd_bn_bias=False,\n train_bn=True, moms=(0.95, 0.85, 0.95), path='.',\n model_dir='models', splitter=<function trainable_params>,\n verbose=False)\n\nGroup together a model, some dls and a loss_func to handle training\n\nfrom tsai.data.preparation import *\n\n\nts = get_forecasting_time_series('Sunspots')\nif ts is not None: # This is to prevent a test fail when the data server is not available\n X, y = SlidingWindowSplitter(60, horizon=1)(ts)\n X, y = X.astype('float32'), y.astype('float32')\n splits = TSSplitter(235)(y)\n batch_tfms = [TSStandardize(by_var=True)]\n learn = TSForecaster(X, y, splits=splits, batch_tfms=batch_tfms, arch=None, arch_config=dict(fc_dropout=.5), metrics=mae, bs=512,\n partial_n=.1, train_metrics=True, device=default_device())\n learn.fit_one_cycle(1)\n\nDataset: Sunspots\ndownloading data...\n...done. Path = data/forecasting/Sunspots.csv\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\ntrain_mae\nvalid_loss\nvalid_mae\ntime\n\n\n\n\n0\n4616.225098\n53.340523\n7969.317871\n74.670258\n00:00\n\n\n\n\n\n\nX=torch.rand(8,2,50)\ny=torch.rand(8,1)\nsplits = TimeSplitter(show_plot=False)(y)\n\nfail_test = []\nfor arch in all_arch_names:\n if not \"plus\" in arch.lower(): continue\n try:\n fcst = TSForecaster(X, y, splits=splits, arch=arch, metrics=mse, device=default_device())\n with ContextManagers([fcst.no_bar(), fcst.no_logging()]):\n fcst.fit_one_cycle(1, 1e-3)\n except Exception as e:\n fail_test.append(arch)\n print(arch, e)\n\ntest_eq(fail_test, [])", + "crumbs": [ + "Training", + "TSLearner" + ] + }, + { + "objectID": "models.rocket.html", + "href": "models.rocket.html", + "title": "ROCKET", + "section": "", + "text": "ROCKET (RandOm Convolutional KErnel Transform) functions for univariate and multivariate time series.\n\n\nsource\n\nRocketClassifier\n\n RocketClassifier (num_kernels=10000, normalize_input=True,\n random_state=None, alphas=array([1.e-03, 1.e-02,\n 1.e-01, 1.e+00, 1.e+01, 1.e+02, 1.e+03]),\n normalize_features=True, memory=None, verbose=False,\n scoring=None, class_weight=None, **kwargs)\n\nTime series classification using ROCKET features and a linear classifier\n\nsource\n\n\nload_rocket\n\n load_rocket (fname='Rocket', path='./models')\n\n\nsource\n\n\nRocketRegressor\n\n RocketRegressor (num_kernels=10000, normalize_input=True,\n random_state=None, alphas=array([1.e-03, 1.e-02, 1.e-01,\n 1.e+00, 1.e+01, 1.e+02, 1.e+03]),\n normalize_features=True, memory=None, verbose=False,\n scoring=None, **kwargs)\n\nTime series regression using ROCKET features and a linear regressor\n\n# Univariate classification with sklearn-type API\ndsid = 'OliveOil'\nfname = 'RocketClassifier'\nX_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')\ncls = RocketClassifier()\ncls.fit(X_train, y_train)\ncls.save(fname)\ndel cls\ncls = load_rocket(fname)\nprint(cls.score(X_test, y_test))\n\nOMP: Info #276: omp_set_nested routine deprecated, please use omp_set_max_active_levels instead.\n\n\n0.9\n\n\n\n# Multivariate classification with sklearn-type API\ndsid = 'NATOPS'\nfname = 'RocketClassifier'\nX_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')\ncls = RocketClassifier()\ncls.fit(X_train, y_train)\ncls.save(fname)\ndel cls\ncls = load_rocket(fname)\nprint(cls.score(X_test, y_test))\n\n0.8666666666666667\n\n\n\nfrom sklearn.metrics import mean_squared_error\n\n\n# Univariate regression with sklearn-type API\ndsid = 'Covid3Month'\nfname = 'RocketRegressor'\nX_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')\nif X_train is not None: \n rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)\n reg = RocketRegressor(scoring=rmse_scorer)\n reg.fit(X_train, y_train)\n reg.save(fname)\n del reg\n reg = load_rocket(fname)\n y_pred = reg.predict(X_test)\n print(mean_squared_error(y_test, y_pred, squared=False))\n\n0.03908714523468997\n\n\n\n# Multivariate regression with sklearn-type API\ndsid = 'AppliancesEnergy'\nfname = 'RocketRegressor'\nX_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')\nif X_train is not None: \n rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)\n reg = RocketRegressor(scoring=rmse_scorer)\n reg.fit(X_train, y_train)\n reg.save(fname)\n del reg\n reg = load_rocket(fname)\n y_pred = reg.predict(X_test)\n print(mean_squared_error(y_test, y_pred, squared=False))\n\n2.287302226812576", + "crumbs": [ + "Models", + "ROCKETs", + "ROCKET" + ] + }, + { + "objectID": "models.xceptiontime.html", + "href": "models.xceptiontime.html", + "title": "XceptionTime", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@timeseriesAI.co modified on:\nFawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J. & Petitjean, F. (2019). InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.\nOfficial InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime\n\nsource\n\nXceptionTime\n\n XceptionTime (c_in, c_out, nf=16, nb_filters=None, adaptive_size=50,\n residual=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nXceptionBlock\n\n XceptionBlock (ni, nf, residual=True, ks=40, bottleneck=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nXceptionModule\n\n XceptionModule (ni, nf, ks=40, bottleneck=True)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nvars = 3\nseq_len = 12\nc_out = 6\nxb = torch.rand(bs, vars, seq_len)\ntest_eq(XceptionTime(vars,c_out)(xb).shape, [bs, c_out])\ntest_eq(XceptionTime(vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])\ntest_eq(XceptionTime(vars,c_out, residual=False)(xb).shape, [bs, c_out])\ntest_eq(count_parameters(XceptionTime(3, 2)), 399540)\n\n\nm = XceptionTime(2,3)\ntest_eq(check_weight(m, is_bn)[0].sum(), 5) # 2 shortcut + 3 bn\ntest_eq(len(check_bias(m, is_conv)[0]), 0)\ntest_eq(len(check_bias(m)[0]), 5) # 2 shortcut + 3 bn\n\n\nXceptionTime(3, 2)\n\nXceptionTime(\n (block): XceptionBlock(\n (xception): ModuleList(\n (0): XceptionModule(\n (bottleneck): Conv1d(3, 16, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): SeparableConv1d(\n (depthwise_conv): Conv1d(16, 16, kernel_size=(39,), stride=(1,), padding=(19,), groups=16, bias=False)\n (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(16, 16, kernel_size=(19,), stride=(1,), padding=(9,), groups=16, bias=False)\n (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): SeparableConv1d(\n (depthwise_conv): Conv1d(16, 16, kernel_size=(9,), stride=(1,), padding=(4,), groups=16, bias=False)\n (pointwise_conv): Conv1d(16, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(3, 16, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n )\n (1): XceptionModule(\n (bottleneck): Conv1d(64, 32, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): SeparableConv1d(\n (depthwise_conv): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), groups=32, bias=False)\n (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), groups=32, bias=False)\n (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): SeparableConv1d(\n (depthwise_conv): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), groups=32, bias=False)\n (pointwise_conv): Conv1d(32, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(64, 32, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n )\n (2): XceptionModule(\n (bottleneck): Conv1d(128, 64, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): SeparableConv1d(\n (depthwise_conv): Conv1d(64, 64, kernel_size=(39,), stride=(1,), padding=(19,), groups=64, bias=False)\n (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(64, 64, kernel_size=(19,), stride=(1,), padding=(9,), groups=64, bias=False)\n (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): SeparableConv1d(\n (depthwise_conv): Conv1d(64, 64, kernel_size=(9,), stride=(1,), padding=(4,), groups=64, bias=False)\n (pointwise_conv): Conv1d(64, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(128, 64, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n )\n (3): XceptionModule(\n (bottleneck): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)\n (convs): ModuleList(\n (0): SeparableConv1d(\n (depthwise_conv): Conv1d(128, 128, kernel_size=(39,), stride=(1,), padding=(19,), groups=128, bias=False)\n (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (1): SeparableConv1d(\n (depthwise_conv): Conv1d(128, 128, kernel_size=(19,), stride=(1,), padding=(9,), groups=128, bias=False)\n (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (2): SeparableConv1d(\n (depthwise_conv): Conv1d(128, 128, kernel_size=(9,), stride=(1,), padding=(4,), groups=128, bias=False)\n (pointwise_conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n )\n (maxconvpool): Sequential(\n (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)\n (1): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)\n )\n (concat): Concat(dim=1)\n )\n )\n (shortcut): ModuleList(\n (0): ConvBlock(\n (0): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (1): ConvBlock(\n (0): Conv1d(128, 512, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (add): Add\n (act): ReLU()\n )\n (head): Sequential(\n (0): AdaptiveAvgPool1d(output_size=50)\n (1): ConvBlock(\n (0): Conv1d(512, 256, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (2): ConvBlock(\n (0): Conv1d(256, 128, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (3): ConvBlock(\n (0): Conv1d(128, 2, kernel_size=(1,), stride=(1,), bias=False)\n (1): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU()\n )\n (4): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n )\n)", + "crumbs": [ + "Models", + "CNNs", + "XceptionTime" + ] + }, + { + "objectID": "models.tcn.html", + "href": "models.tcn.html", + "title": "TCN", + "section": "", + "text": "This is an unofficial PyTorch implementation by Ignacio Oguiza (oguiza@timeseriesAI.co) based on:\n\nBai, S., Kolter, J. Z., & Koltun, V. (2018). An empirical evaluation of generic convolutional and recurrent networks for sequence modeling. arXiv preprint arXiv:1803.01271.\nOfficial TCN PyTorch implementation: https://github.com/locuslab/TCN\n\n\nsource\n\nTCN\n\n TCN (c_in, c_out, layers=[25, 25, 25, 25, 25, 25, 25, 25], ks=7,\n conv_dropout=0.0, fc_dropout=0.0)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nTemporalConvNet\n\n TemporalConvNet (c_in, layers, ks=2, dropout=0.0)\n\n\nsource\n\n\nTemporalBlock\n\n TemporalBlock (ni, nf, ks, stride, dilation, padding, dropout=0.0)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nbs = 16\nnvars = 3\nseq_len = 128\nc_out = 2\nxb = torch.rand(bs, nvars, seq_len)\nmodel = TCN(nvars, c_out, fc_dropout=.5)\ntest_eq(model(xb).shape, (bs, c_out))\nmodel = TCN(nvars, c_out, conv_dropout=.2)\ntest_eq(model(xb).shape, (bs, c_out))\nmodel = TCN(nvars, c_out)\ntest_eq(model(xb).shape, (bs, c_out))\nmodel\n\nTCN(\n (tcn): Sequential(\n (0): TemporalBlock(\n (conv1): Conv1d(3, 25, kernel_size=(7,), stride=(1,), padding=(6,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(6,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(3, 25, kernel_size=(7,), stride=(1,), padding=(6,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(6,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (downsample): Conv1d(3, 25, kernel_size=(1,), stride=(1,))\n (relu): ReLU()\n )\n (1): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(12,), dilation=(2,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (2): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(24,), dilation=(4,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (3): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(48,), dilation=(8,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (4): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(96,), dilation=(16,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (5): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(192,), dilation=(32,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (6): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(384,), dilation=(64,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n (7): TemporalBlock(\n (conv1): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))\n (chomp1): Chomp1d()\n (relu1): ReLU()\n (dropout1): Dropout(p=0.0, inplace=False)\n (conv2): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))\n (chomp2): Chomp1d()\n (relu2): ReLU()\n (dropout2): Dropout(p=0.0, inplace=False)\n (net): Sequential(\n (0): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))\n (1): Chomp1d()\n (2): ReLU()\n (3): Dropout(p=0.0, inplace=False)\n (4): Conv1d(25, 25, kernel_size=(7,), stride=(1,), padding=(768,), dilation=(128,))\n (5): Chomp1d()\n (6): ReLU()\n (7): Dropout(p=0.0, inplace=False)\n )\n (relu): ReLU()\n )\n )\n (gap): GAP1d(\n (gap): AdaptiveAvgPool1d(output_size=1)\n (flatten): Flatten(full=False)\n )\n (linear): Linear(in_features=25, out_features=2, bias=True)\n)", + "crumbs": [ + "Models", + "CNNs", + "TCN" + ] + }, + { + "objectID": "data.unwindowed.html", + "href": "data.unwindowed.html", + "title": "Unwindowed datasets", + "section": "", + "text": "Functionality that will allow you to create a dataset that applies sliding windows to the input data on the fly. This heavily reduces the size of the input data files, as only the original unwindowed data needs to be stored.\n\nI’d like to thank both Thomas Capelle (https://github.com/tcapelle) and Xander Dunn (https://github.com/xanderdunn) for their contributions to make this code possible.\n\nsource\n\nTSUnwindowedDatasets\n\n TSUnwindowedDatasets (dataset, splits)\n\nBase class for lists with subsets\n\nsource\n\n\nTSUnwindowedDataset\n\n TSUnwindowedDataset (X=None, y=None, y_func=None, window_size=1,\n stride=1, drop_start=0, drop_end=0, seq_first=True,\n **kwargs)\n\nInitialize self. See help(type(self)) for accurate signature.\n\ndef y_func(y): return y.astype('float').mean(1)\n\nThis approach works with both univariate and multivariate data.\n\nUnivariate: we’ll use a simple array with 20 values, one with the seq_len first (X0), the other with seq_len second (X1).\nMultivariate: we’ll use 2 time series arrays, one with the seq_len first (X2), the other with seq_len second (X3). No sliding window has been applied to them yet.\n\n\n# Univariate\nX0 = np.arange(20).astype(float)\nX1 = np.arange(20).reshape(1, -1).astype(float)\nX0.shape, X0, X1.shape, X1\n\n((20,),\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.,\n 13., 14., 15., 16., 17., 18., 19.]),\n (1, 20),\n array([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.,\n 13., 14., 15., 16., 17., 18., 19.]]))\n\n\n\n# Multivariate\nX2 = np.arange(20).reshape(-1,1)*np.array([1, 10, 100]).reshape(1,-1).astype(float)\nX3 = np.arange(20).reshape(1,-1)*np.array([1, 10, 100]).reshape(-1,1).astype(float)\nX2.shape, X3.shape, X2, X3\n\n((20, 3),\n (3, 20),\n array([[0.0e+00, 0.0e+00, 0.0e+00],\n [1.0e+00, 1.0e+01, 1.0e+02],\n [2.0e+00, 2.0e+01, 2.0e+02],\n [3.0e+00, 3.0e+01, 3.0e+02],\n [4.0e+00, 4.0e+01, 4.0e+02],\n [5.0e+00, 5.0e+01, 5.0e+02],\n [6.0e+00, 6.0e+01, 6.0e+02],\n [7.0e+00, 7.0e+01, 7.0e+02],\n [8.0e+00, 8.0e+01, 8.0e+02],\n [9.0e+00, 9.0e+01, 9.0e+02],\n [1.0e+01, 1.0e+02, 1.0e+03],\n [1.1e+01, 1.1e+02, 1.1e+03],\n [1.2e+01, 1.2e+02, 1.2e+03],\n [1.3e+01, 1.3e+02, 1.3e+03],\n [1.4e+01, 1.4e+02, 1.4e+03],\n [1.5e+01, 1.5e+02, 1.5e+03],\n [1.6e+01, 1.6e+02, 1.6e+03],\n [1.7e+01, 1.7e+02, 1.7e+03],\n [1.8e+01, 1.8e+02, 1.8e+03],\n [1.9e+01, 1.9e+02, 1.9e+03]]),\n array([[0.0e+00, 1.0e+00, 2.0e+00, 3.0e+00, 4.0e+00, 5.0e+00, 6.0e+00,\n 7.0e+00, 8.0e+00, 9.0e+00, 1.0e+01, 1.1e+01, 1.2e+01, 1.3e+01,\n 1.4e+01, 1.5e+01, 1.6e+01, 1.7e+01, 1.8e+01, 1.9e+01],\n [0.0e+00, 1.0e+01, 2.0e+01, 3.0e+01, 4.0e+01, 5.0e+01, 6.0e+01,\n 7.0e+01, 8.0e+01, 9.0e+01, 1.0e+02, 1.1e+02, 1.2e+02, 1.3e+02,\n 1.4e+02, 1.5e+02, 1.6e+02, 1.7e+02, 1.8e+02, 1.9e+02],\n [0.0e+00, 1.0e+02, 2.0e+02, 3.0e+02, 4.0e+02, 5.0e+02, 6.0e+02,\n 7.0e+02, 8.0e+02, 9.0e+02, 1.0e+03, 1.1e+03, 1.2e+03, 1.3e+03,\n 1.4e+03, 1.5e+03, 1.6e+03, 1.7e+03, 1.8e+03, 1.9e+03]]))\n\n\nNow, instead of applying SlidingWindow to create and save the time series that can be consumed by a time series model, we can use a dataset that creates the data on the fly. In this way we avoid the need to create and save large files. This approach is also useful when you want to test different sliding window sizes, as otherwise you would need to create files for every size you want to test.The dataset will create the samples correctly formatted and ready to be passed on to a time series architecture.\n\nwds0 = TSUnwindowedDataset(X0, window_size=5, stride=2, seq_first=True)[:][0]\nwds1 = TSUnwindowedDataset(X1, window_size=5, stride=2, seq_first=False)[:][0]\ntest_eq(wds0, wds1)\nwds0, wds0.data, wds1, wds1.data\n\n(TSTensor(samples:8, vars:1, len:5, device=cpu),\n tensor([[[ 0., 1., 2., 3., 4.]],\n \n [[ 2., 3., 4., 5., 6.]],\n \n [[ 4., 5., 6., 7., 8.]],\n \n [[ 6., 7., 8., 9., 10.]],\n \n [[ 8., 9., 10., 11., 12.]],\n \n [[10., 11., 12., 13., 14.]],\n \n [[12., 13., 14., 15., 16.]],\n \n [[14., 15., 16., 17., 18.]]]),\n TSTensor(samples:8, vars:1, len:5, device=cpu),\n tensor([[[ 0., 1., 2., 3., 4.]],\n \n [[ 2., 3., 4., 5., 6.]],\n \n [[ 4., 5., 6., 7., 8.]],\n \n [[ 6., 7., 8., 9., 10.]],\n \n [[ 8., 9., 10., 11., 12.]],\n \n [[10., 11., 12., 13., 14.]],\n \n [[12., 13., 14., 15., 16.]],\n \n [[14., 15., 16., 17., 18.]]]))\n\n\n\nwds2 = TSUnwindowedDataset(X2, window_size=5, stride=2, seq_first=True)[:][0]\nwds3 = TSUnwindowedDataset(X3, window_size=5, stride=2, seq_first=False)[:][0]\ntest_eq(wds2, wds3)\nwds2, wds3, wds2.data, wds3.data\n\n(TSTensor(samples:8, vars:3, len:5, device=cpu),\n TSTensor(samples:8, vars:3, len:5, device=cpu),\n tensor([[[0.0000e+00, 1.0000e+00, 2.0000e+00, 3.0000e+00, 4.0000e+00],\n [0.0000e+00, 1.0000e+01, 2.0000e+01, 3.0000e+01, 4.0000e+01],\n [0.0000e+00, 1.0000e+02, 2.0000e+02, 3.0000e+02, 4.0000e+02]],\n \n [[2.0000e+00, 3.0000e+00, 4.0000e+00, 5.0000e+00, 6.0000e+00],\n [2.0000e+01, 3.0000e+01, 4.0000e+01, 5.0000e+01, 6.0000e+01],\n [2.0000e+02, 3.0000e+02, 4.0000e+02, 5.0000e+02, 6.0000e+02]],\n \n [[4.0000e+00, 5.0000e+00, 6.0000e+00, 7.0000e+00, 8.0000e+00],\n [4.0000e+01, 5.0000e+01, 6.0000e+01, 7.0000e+01, 8.0000e+01],\n [4.0000e+02, 5.0000e+02, 6.0000e+02, 7.0000e+02, 8.0000e+02]],\n \n [[6.0000e+00, 7.0000e+00, 8.0000e+00, 9.0000e+00, 1.0000e+01],\n [6.0000e+01, 7.0000e+01, 8.0000e+01, 9.0000e+01, 1.0000e+02],\n [6.0000e+02, 7.0000e+02, 8.0000e+02, 9.0000e+02, 1.0000e+03]],\n \n [[8.0000e+00, 9.0000e+00, 1.0000e+01, 1.1000e+01, 1.2000e+01],\n [8.0000e+01, 9.0000e+01, 1.0000e+02, 1.1000e+02, 1.2000e+02],\n [8.0000e+02, 9.0000e+02, 1.0000e+03, 1.1000e+03, 1.2000e+03]],\n \n [[1.0000e+01, 1.1000e+01, 1.2000e+01, 1.3000e+01, 1.4000e+01],\n [1.0000e+02, 1.1000e+02, 1.2000e+02, 1.3000e+02, 1.4000e+02],\n [1.0000e+03, 1.1000e+03, 1.2000e+03, 1.3000e+03, 1.4000e+03]],\n \n [[1.2000e+01, 1.3000e+01, 1.4000e+01, 1.5000e+01, 1.6000e+01],\n [1.2000e+02, 1.3000e+02, 1.4000e+02, 1.5000e+02, 1.6000e+02],\n [1.2000e+03, 1.3000e+03, 1.4000e+03, 1.5000e+03, 1.6000e+03]],\n \n [[1.4000e+01, 1.5000e+01, 1.6000e+01, 1.7000e+01, 1.8000e+01],\n [1.4000e+02, 1.5000e+02, 1.6000e+02, 1.7000e+02, 1.8000e+02],\n [1.4000e+03, 1.5000e+03, 1.6000e+03, 1.7000e+03, 1.8000e+03]]]),\n tensor([[[0.0000e+00, 1.0000e+00, 2.0000e+00, 3.0000e+00, 4.0000e+00],\n [0.0000e+00, 1.0000e+01, 2.0000e+01, 3.0000e+01, 4.0000e+01],\n [0.0000e+00, 1.0000e+02, 2.0000e+02, 3.0000e+02, 4.0000e+02]],\n \n [[2.0000e+00, 3.0000e+00, 4.0000e+00, 5.0000e+00, 6.0000e+00],\n [2.0000e+01, 3.0000e+01, 4.0000e+01, 5.0000e+01, 6.0000e+01],\n [2.0000e+02, 3.0000e+02, 4.0000e+02, 5.0000e+02, 6.0000e+02]],\n \n [[4.0000e+00, 5.0000e+00, 6.0000e+00, 7.0000e+00, 8.0000e+00],\n [4.0000e+01, 5.0000e+01, 6.0000e+01, 7.0000e+01, 8.0000e+01],\n [4.0000e+02, 5.0000e+02, 6.0000e+02, 7.0000e+02, 8.0000e+02]],\n \n [[6.0000e+00, 7.0000e+00, 8.0000e+00, 9.0000e+00, 1.0000e+01],\n [6.0000e+01, 7.0000e+01, 8.0000e+01, 9.0000e+01, 1.0000e+02],\n [6.0000e+02, 7.0000e+02, 8.0000e+02, 9.0000e+02, 1.0000e+03]],\n \n [[8.0000e+00, 9.0000e+00, 1.0000e+01, 1.1000e+01, 1.2000e+01],\n [8.0000e+01, 9.0000e+01, 1.0000e+02, 1.1000e+02, 1.2000e+02],\n [8.0000e+02, 9.0000e+02, 1.0000e+03, 1.1000e+03, 1.2000e+03]],\n \n [[1.0000e+01, 1.1000e+01, 1.2000e+01, 1.3000e+01, 1.4000e+01],\n [1.0000e+02, 1.1000e+02, 1.2000e+02, 1.3000e+02, 1.4000e+02],\n [1.0000e+03, 1.1000e+03, 1.2000e+03, 1.3000e+03, 1.4000e+03]],\n \n [[1.2000e+01, 1.3000e+01, 1.4000e+01, 1.5000e+01, 1.6000e+01],\n [1.2000e+02, 1.3000e+02, 1.4000e+02, 1.5000e+02, 1.6000e+02],\n [1.2000e+03, 1.3000e+03, 1.4000e+03, 1.5000e+03, 1.6000e+03]],\n \n [[1.4000e+01, 1.5000e+01, 1.6000e+01, 1.7000e+01, 1.8000e+01],\n [1.4000e+02, 1.5000e+02, 1.6000e+02, 1.7000e+02, 1.8000e+02],\n [1.4000e+03, 1.5000e+03, 1.6000e+03, 1.7000e+03, 1.8000e+03]]]))", + "crumbs": [ + "Data", + "Unwindowed datasets" + ] + }, + { + "objectID": "optimizer.html", + "href": "optimizer.html", + "title": "Optimizers", + "section": "", + "text": "This contains a set of optimizers.\n\n\nsource\n\nwrap_optimizer\n\n wrap_optimizer (opt, **kwargs)\n\nYou can natively use any of the optimizers included in the fastai library. You just need to pass it to the learner as the opt_func.\nIn addition, you will be able to use any of the optimizers from:\n\nPytorch\ntorch_optimizer (https://github.com/jettify/pytorch-optimizer). In this case, you will need to install torch-optimizer first)\n\nExamples of use:\nadamw = wrap_optimizer(torch.optim.AdamW)\nimport torch_optimizer as optim\nadabelief = wrap_optimizer(optim.AdaBelief)\nIf you want to use any these last 2, you can use the wrap_optimizer function. Here are a few examples:", + "crumbs": [ + "Training", + "Optimizers" + ] + }, + { + "objectID": "losses.html", + "href": "losses.html", + "title": "Losses", + "section": "", + "text": "Losses not available in fastai or Pytorch.\n\n\nsource\n\nHuberLoss\n\n HuberLoss (reduction='mean', delta=1.0)\n\nHuber loss\nCreates a criterion that uses a squared term if the absolute element-wise error falls below delta and a delta-scaled L1 term otherwise. This loss combines advantages of both :class:L1Loss and :class:MSELoss; the delta-scaled L1 region makes the loss less sensitive to outliers than :class:MSELoss, while the L2 region provides smoothness over :class:L1Loss near 0. See Huber loss <https://en.wikipedia.org/wiki/Huber_loss>_ for more information. This loss is equivalent to nn.SmoothL1Loss when delta == 1.\n\nsource\n\n\nLogCoshLoss\n\n LogCoshLoss (reduction='mean', delta=1.0)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\ninp = torch.rand(8, 3, 10)\ntarg = torch.randn(8, 3, 10)\ntest_close(HuberLoss(delta=1)(inp, targ), nn.SmoothL1Loss()(inp, targ))\nLogCoshLoss()(inp, targ)\n\ntensor(0.4588)\n\n\n\nsource\n\n\nMaskedLossWrapper\n\n MaskedLossWrapper (crit)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\ninp = torch.rand(8, 3, 10)\ntarg = torch.randn(8, 3, 10)\ntarg[targ >.8] = np.nan\nnn.L1Loss()(inp, targ), MaskedLossWrapper(nn.L1Loss())(inp, targ)\n\n(tensor(nan), tensor(1.0520))\n\n\n\nsource\n\n\nCenterPlusLoss\n\n CenterPlusLoss (loss, c_out, λ=0.01, logits_dim=None)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nsource\n\n\nCenterLoss\n\n CenterLoss (c_out, logits_dim=None)\n\nCode in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py Based on paper: Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.\nArgs: c_out (int): number of classes. logits_dim (int): dim 1 of the logits. By default same as c_out (for one hot encoded logits)\n\nc_in = 10\nx = torch.rand(64, c_in).to(device=default_device())\nx = F.softmax(x, dim=1)\nlabel = x.max(dim=1).indices\nCenterLoss(c_in).to(x.device)(x, label), CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in).to(x.device)(x, label)\n\n(tensor(9.2481, grad_fn=<DivBackward0>),\n TensorBase(2.3559, grad_fn=<AliasBackward0>))\n\n\n\nCenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)\n\nCenterPlusLoss(loss=FlattenedLoss of LabelSmoothingCrossEntropy(), c_out=10, λ=0.01)\n\n\n\nsource\n\n\nFocalLoss\n\n FocalLoss (alpha:Optional[torch.Tensor]=None, gamma:float=2.0,\n reduction:str='mean')\n\nWeighted, multiclass focal loss\n\ninputs = torch.normal(0, 2, (16, 2)).to(device=default_device())\ntargets = torch.randint(0, 2, (16,)).to(device=default_device())\nFocalLoss()(inputs, targets)\n\ntensor(0.9829)\n\n\n\nsource\n\n\nTweedieLoss\n\n TweedieLoss (p=1.5, eps=1e-08)\n\nSame as nn.Module, but no need for subclasses to call super().__init__\n\nc_in = 10\noutput = torch.rand(64).to(device=default_device())\ntarget = torch.rand(64).to(device=default_device())\nTweedieLoss().to(output.device)(output, target)\n\ntensor(3.0539)", + "crumbs": [ + "Training", + "Losses" + ] + }, + { + "objectID": "data.preprocessing.html", + "href": "data.preprocessing.html", + "title": "Data preprocessing", + "section": "", + "text": "Functions used to preprocess time series (both X and y).\nfrom tsai.data.external import get_UCR_data\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\ntfms = [None, Categorize()]\ndsets = TSDatasets(X, y, tfms=tfms, splits=splits)\nsource", + "crumbs": [ + "Data", + "Data preprocessing" + ] + }, + { + "objectID": "data.preprocessing.html#y-transforms", + "href": "data.preprocessing.html#y-transforms", + "title": "Data preprocessing", + "section": "y transforms", + "text": "y transforms\n\nsource\n\nPreprocessor\n\n Preprocessor (preprocessor, **kwargs)\n\nInitialize self. See help(type(self)) for accurate signature.\n\n# Standardize\nfrom tsai.data.validation import TimeSplitter\n\n\ny = random_shuffle(np.random.randn(1000) * 10 + 5)\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(StandardScaler)\npreprocessor.fit(y[splits[0]])\ny_tfm = preprocessor.transform(y)\ntest_close(preprocessor.inverse_transform(y_tfm), y)\nplt.hist(y, 50, label='ori',)\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# RobustScaler\ny = random_shuffle(np.random.randn(1000) * 10 + 5)\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(RobustScaler)\npreprocessor.fit(y[splits[0]])\ny_tfm = preprocessor.transform(y)\ntest_close(preprocessor.inverse_transform(y_tfm), y)\nplt.hist(y, 50, label='ori',)\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Normalize\ny = random_shuffle(np.random.rand(1000) * 3 + .5)\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(Normalizer)\npreprocessor.fit(y[splits[0]])\ny_tfm = preprocessor.transform(y)\ntest_close(preprocessor.inverse_transform(y_tfm), y)\nplt.hist(y, 50, label='ori',)\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# BoxCox\ny = random_shuffle(np.random.rand(1000) * 10 + 5)\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(BoxCox)\npreprocessor.fit(y[splits[0]])\ny_tfm = preprocessor.transform(y)\ntest_close(preprocessor.inverse_transform(y_tfm), y)\nplt.hist(y, 50, label='ori',)\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# YeoJohnshon\ny = random_shuffle(np.random.randn(1000) * 10 + 5)\ny = np.random.beta(.5, .5, size=1000)\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(YeoJohnshon)\npreprocessor.fit(y[splits[0]])\ny_tfm = preprocessor.transform(y)\ntest_close(preprocessor.inverse_transform(y_tfm), y)\nplt.hist(y, 50, label='ori',)\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# QuantileTransformer\ny = - np.random.beta(1, .5, 10000) * 10\nsplits = TimeSplitter()(y)\npreprocessor = Preprocessor(Quantile)\npreprocessor.fit(y[splits[0]])\nplt.hist(y, 50, label='ori',)\ny_tfm = preprocessor.transform(y)\nplt.legend(loc='best')\nplt.show()\nplt.hist(y_tfm, 50, label='tfm')\nplt.legend(loc='best')\nplt.show()\ntest_close(preprocessor.inverse_transform(y_tfm), y, 1e-1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nsource\n\n\nReLabeler\n\n ReLabeler (cm)\n\nChanges the labels in a dataset based on a dictionary (class mapping) Args: cm = class mapping dictionary\n\nvals = {0:'a', 1:'b', 2:'c', 3:'d', 4:'e'}\ny = np.array([vals[i] for i in np.random.randint(0, 5, 20)])\nlabeler = ReLabeler(dict(a='x', b='x', c='y', d='z', e='z'))\ny_new = labeler(y)\ntest_eq(y.shape, y_new.shape)\ny, y_new\n\n(array(['d', 'd', 'a', 'd', 'b', 'e', 'a', 'd', 'b', 'c', 'b', 'e', 'b',\n 'b', 'a', 'e', 'd', 'e', 'c', 'e'], dtype='<U1'),\n array(['z', 'z', 'x', 'z', 'x', 'z', 'x', 'z', 'x', 'y', 'x', 'z', 'x',\n 'x', 'x', 'z', 'z', 'z', 'y', 'z'], dtype='<U1'))", + "crumbs": [ + "Data", + "Data preprocessing" + ] + }, + { + "objectID": "learner.html", + "href": "learner.html", + "title": "Learner", + "section": "", + "text": "fastai Learner extensions.\n\n\nsource\n\nLearner.show_batch\n\n Learner.show_batch (**kwargs)\n\n\nsource\n\n\nLearner.remove_all_cbs\n\n Learner.remove_all_cbs (max_iters=10)\n\n\nsource\n\n\nLearner.one_batch\n\n Learner.one_batch (i, b)\n\n\nsource\n\n\nLearner.inverse_transform\n\n Learner.inverse_transform (df:pandas.core.frame.DataFrame)\n\nApplies sklearn-type pipeline inverse transforms\n\nsource\n\n\nLearner.transform\n\n Learner.transform (df:pandas.core.frame.DataFrame)\n\nApplies sklearn-type pipeline transforms\n⚠️ Important: save_all and load_all methods are designed for small datasets only. If you are using a larger dataset, you should use the standard save and load_learner methods.\n\nsource\n\n\nload_all\n\n load_all (path='export', dls_fname='dls', model_fname='model',\n learner_fname='learner', device=None, pickle_module=<module\n 'pickle' from '/opt/hostedtoolcache/Python/3.9.18/x64/lib/pytho\n n3.9/pickle.py'>, verbose=False)\n\n\nsource\n\n\nLearner.save_all\n\n Learner.save_all (path='export', dls_fname='dls', model_fname='model',\n learner_fname='learner', verbose=False)\n\n\nfrom tsai.data.core import get_ts_dls\nfrom tsai.utils import remove_dir\n\n\nX = np.random.rand(100, 2, 10)\ndls = get_ts_dls(X)\nlearn = Learner(dls, InceptionTimePlus(2, 1), loss_func=MSELossFlat())\nlearn.save_all(Path.home()/'tmp', verbose=True)\nlearn2 = load_all(Path.home()/'tmp', verbose=True)\nremove_dir(Path.home()/'tmp')\n\nLearner saved:\npath = '/Users/nacho/tmp'\ndls_fname = '['dls_0.pth', 'dls_1.pth']'\nmodel_fname = 'model.pth'\nlearner_fname = 'learner.pkl'\nLearner loaded:\npath = '/Users/nacho/tmp'\ndls_fname = '['dls_0.pth', 'dls_1.pth']'\nmodel_fname = 'model.pth'\nlearner_fname = 'learner.pkl'\n/Users/nacho/tmp directory removed.\n\n\n\nsource\n\n\nLearner.plot_metrics\n\n Learner.plot_metrics (nrows:int=1, ncols:int=1, figsize:tuple=None,\n imsize:int=3, suptitle:str=None, sharex:Union[bool,\n Literal['none','all','row','col']]=False, sharey:Un\n ion[bool,Literal['none','all','row','col']]=False,\n squeeze:bool=True,\n width_ratios:Optional[Sequence[float]]=None,\n height_ratios:Optional[Sequence[float]]=None,\n subplot_kw:Optional[dict[str,Any]]=None,\n gridspec_kw:Optional[dict[str,Any]]=None)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nnrows\nint\n1\nNumber of rows in returned axes grid\n\n\nncols\nint\n1\nNumber of columns in returned axes grid\n\n\nfigsize\ntuple\nNone\nWidth, height in inches of the returned figure\n\n\nimsize\nint\n3\nSize (in inches) of images that will be displayed in the returned figure\n\n\nsuptitle\nstr\nNone\nTitle to be set to returned figure\n\n\nsharex\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsharey\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsqueeze\nbool\nTrue\n\n\n\nwidth_ratios\nSequence[float] | None\nNone\n\n\n\nheight_ratios\nSequence[float] | None\nNone\n\n\n\nsubplot_kw\ndict[str, Any] | None\nNone\n\n\n\ngridspec_kw\ndict[str, Any] | None\nNone\n\n\n\nReturns\n(plt.Figure, plt.Axes)\n\nReturns both fig and ax as a tuple\n\n\n\n\nsource\n\n\nRecorder.plot_metrics\n\n Recorder.plot_metrics (nrows=None, ncols=None, figsize=None,\n final_losses=True, perc=0.5, imsize:int=3,\n suptitle:str=None, sharex:Union[bool,Literal['none\n ','all','row','col']]=False, sharey:Union[bool,Lit\n eral['none','all','row','col']]=False,\n squeeze:bool=True,\n width_ratios:Optional[Sequence[float]]=None,\n height_ratios:Optional[Sequence[float]]=None,\n subplot_kw:Optional[dict[str,Any]]=None,\n gridspec_kw:Optional[dict[str,Any]]=None)\n\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nnrows\nint\n1\nNumber of rows in returned axes grid\n\n\nncols\nint\n1\nNumber of columns in returned axes grid\n\n\nfigsize\ntuple\nNone\nWidth, height in inches of the returned figure\n\n\nfinal_losses\nbool\nTrue\n\n\n\nperc\nfloat\n0.5\n\n\n\nimsize\nint\n3\nSize (in inches) of images that will be displayed in the returned figure\n\n\nsuptitle\nstr\nNone\nTitle to be set to returned figure\n\n\nsharex\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsharey\nbool | Literal[‘none’, ‘all’, ‘row’, ‘col’]\nFalse\n\n\n\nsqueeze\nbool\nTrue\n\n\n\nwidth_ratios\nSequence[float] | None\nNone\n\n\n\nheight_ratios\nSequence[float] | None\nNone\n\n\n\nsubplot_kw\ndict[str, Any] | None\nNone\n\n\n\ngridspec_kw\ndict[str, Any] | None\nNone\n\n\n\nReturns\n(plt.Figure, plt.Axes)\n\nReturns both fig and ax as a tuple\n\n\n\n\nsource\n\n\nget_arch\n\n get_arch (arch_name)\n\n\nfor arch_name in all_arch_names:\n get_arch(arch_name)\n\n\nsource\n\n\nts_learner\n\n ts_learner (dls, arch=None, c_in=None, c_out=None, seq_len=None, d=None,\n s_cat_idxs=None, s_cat_embeddings=None,\n s_cat_embedding_dims=None, s_cont_idxs=None, o_cat_idxs=None,\n o_cat_embeddings=None, o_cat_embedding_dims=None,\n o_cont_idxs=None, splitter=<function trainable_params>,\n loss_func=None, opt_func=<function Adam>, lr=0.001, cbs=None,\n metrics=None, path=None, model_dir='models', wd=None,\n wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95),\n train_metrics=False, valid_metrics=True, seed=None,\n device=None, verbose=False, patch_len=None,\n patch_stride=None, fusion_layers=128, fusion_act='relu',\n fusion_dropout=0.0, fusion_use_bn=True, pretrained=False,\n weights_path=None, exclude_head=True, cut=-1, init=None,\n arch_config={})\n\n\nsource\n\n\ntsimage_learner\n\n tsimage_learner (dls, arch=None, pretrained=False, loss_func=None,\n opt_func=<function Adam>, lr=0.001, cbs=None,\n metrics=None, path=None, model_dir='models', wd=None,\n wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85,\n 0.95), c_in=None, c_out=None, device=None,\n verbose=False, init=None, arch_config={})\n\n\nsource\n\n\nLearner.decoder\n\n Learner.decoder (o)\n\n\nfrom tsai.data.core import *\nfrom tsai.data.external import get_UCR_data\nfrom tsai.models.FCNPlus import FCNPlus\n\n\nX, y, splits = get_UCR_data('OliveOil', verbose=True, split_data=False)\ntfms = [None, [TSCategorize()]]\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms)\nlearn = ts_learner(dls, FCNPlus)\nfor p in learn.model.parameters():\n p.requires_grad=False\ntest_eq(count_parameters(learn.model), 0)\nlearn.freeze()\ntest_eq(count_parameters(learn.model), 1540)\nlearn.unfreeze()\ntest_eq(count_parameters(learn.model), 264580)\n\nlearn = ts_learner(dls, 'FCNPlus')\nfor p in learn.model.parameters():\n p.requires_grad=False\ntest_eq(count_parameters(learn.model), 0)\nlearn.freeze()\ntest_eq(count_parameters(learn.model), 1540)\nlearn.unfreeze()\ntest_eq(count_parameters(learn.model), 264580)\n\nDataset: OliveOil\nX : (60, 1, 570)\ny : (60,)\nsplits : (#30) [0,1,2,3,4,5,6,7,8,9...] (#30) [30,31,32,33,34,35,36,37,38,39...] \n\n\n\n\nlearn.show_batch();\n\n\n\n\n\n\n\n\n\nfrom fastai.metrics import accuracy\nfrom tsai.data.preprocessing import TSRobustScale\n\n\nX, y, splits = get_UCR_data('OliveOil', split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = TSRobustScale()\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms)\nlearn = ts_learner(dls, FCNPlus, metrics=accuracy, train_metrics=True)\nlearn.fit_one_cycle(2)\nlearn.plot_metrics()\n\n\n\n\n\n\n\n\nepoch\ntrain_loss\ntrain_accuracy\nvalid_loss\nvalid_accuracy\ntime\n\n\n\n\n0\n1.480875\n0.266667\n1.390461\n0.300000\n00:02\n\n\n1\n1.476655\n0.266667\n1.387370\n0.300000\n00:01\n\n\n\n\n\n\n\n\n\n\n\n\n\nif not os.path.exists(\"./models\"): os.mkdir(\"./models\")\nif not os.path.exists(\"./data\"): os.mkdir(\"./data\")\nnp.save(\"data/X_test.npy\", X[splits[1]])\nnp.save(\"data/y_test.npy\", y[splits[1]])\nlearn.export(\"./models/test.pth\")", + "crumbs": [ + "Training", + "Learner" + ] + }, + { + "objectID": "export.html", + "href": "export.html", + "title": "nb2py", + "section": "", + "text": "nb2py will allow you to convert the notebook (.ipynb) where the function is executed to a python script.\n\nThe conversion applies these rules:\n\nThe notebook will be automatically saved when the function is executed.\nOnly code cells will be converted (not markdown cells).\nA header will be added to indicate the script has been automatically generated. It also indicates where the original ipynb is.\nCells with a #hide flag won’t be converted. Flag variants like # hide, #Hide, #HIDE, … are also acceptable.\nEmpty cells and unnecessary empty lines within cells will be removed.\nBy default the script will be created with the same name and in the same folder of the original notebook. But you can pass a dir folder and a different name if you wish.\nIf a script with the same name already exists, it will be overwriten.\n\nThis code is required to identify flags in the notebook. We are looking for #hide flags.\nThis code automatically gets the name of the notebook. It’s been tested to work on Jupyter notebooks, Jupyter Lab and Google Colab.\n\nsource\n\nget_script_path\n\n get_script_path (nb_name=None)\n\n\nsource\n\n\nnb_name_to_py\n\n nb_name_to_py (nb_name)\n\n\nsource\n\n\nget_nb_path\n\n get_nb_path ()\n\nReturns the absolute path of the notebook, or raises a FileNotFoundError exception if it cannot be determined.\n\nsource\n\n\nget_colab_nb_name\n\n get_colab_nb_name ()\n\n\nsource\n\n\nget_nb_name\n\n get_nb_name (d=None)\n\nReturns the short name of the notebook w/o the .ipynb extension, or raises a FileNotFoundError exception if it cannot be determined.\nThis code is used when trying to save a file to google drive. We first need to mount the drive.\n\nsource\n\n\nnb2py\n\n nb2py (nb:str<absoluteorrelativefullpathtothenotebookyouwanttoconverttoap\n ythonscript>=None, folder:str<absoluteorrelativepathtofolderofthes\n criptyouwillcreate.Defaultstocurrentnb'sdirectory>=None, name:str<\n nameofthescriptyouwanttocreate.Defaultstocurrentnbname.ipynbby.py>\n =None, save:<savesthenbbeforeconvertingittoascript>=True,\n run:<importandrunthescript>=False,\n verbose:<controlsverbosity>=True)\n\nConverts a notebook to a python script in a predefined folder.\n\nif not is_colab():\n nb = None\n folder = None\n name = None\n pyname = nb2py(nb=nb, folder=folder, name=name)\n if pyname is not None: \n assert os.path.isfile(pyname)\n os.remove(pyname)\n assert not os.path.isfile(pyname)\n\n nb = '001_export.ipynb'\n folder = None\n name = None\n pyname = nb2py(nb=nb, folder=folder, name=name)\n if pyname is not None: \n assert os.path.isfile(pyname)\n os.remove(pyname)\n assert not os.path.isfile(pyname)\n\n nb = '../nbs/001_export'\n folder = None\n name = None\n pyname = nb2py(nb=nb, folder=folder, name=name)\n if pyname is not None: \n assert os.path.isfile(pyname)\n os.remove(pyname)\n assert not os.path.isfile(pyname)\n\n nb = None\n folder = '../test_export/'\n name = None\n pyname = nb2py(nb=nb, folder=folder, name=name)\n if pyname is not None: \n assert os.path.isfile(pyname)\n shutil.rmtree(folder)\n assert not os.path.isfile(pyname)\n\nnb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py.\n001_export.ipynb converted to /Users/nacho/notebooks/tsai/nbs/001_export.py\n001_export.ipynb converted to /Users/nacho/notebooks/tsai/nbs/../nbs/001_export.py\nnb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py." + }, + { + "objectID": "models.rnnplus.html", + "href": "models.rnnplus.html", + "title": "RNNPlus", + "section": "", + "text": "These are RNN, LSTM and GRU PyTorch implementations created by Ignacio Oguiza - oguiza@timeseriesAI.co\nThe idea of including a feature extractor to the RNN network comes from the solution developed by the UPSTAGE team (https://www.kaggle.com/songwonho, https://www.kaggle.com/limerobot and https://www.kaggle.com/jungikhyo). They finished in 3rd position in Kaggle’s Google Brain - Ventilator Pressure Prediction competition. They used a Conv1d + Stacked LSTM architecture.\nsource", + "crumbs": [ + "Models", + "RNNs", + "RNNPlus" + ] + }, + { + "objectID": "models.rnnplus.html#converting-a-model-to-torchscript", + "href": "models.rnnplus.html#converting-a-model-to-torchscript", + "title": "RNNPlus", + "section": "Converting a model to TorchScript", + "text": "Converting a model to TorchScript\n\nmodel = GRUPlus(c_in, c_out, hidden_size=100, n_layers=2, bidirectional=True, rnn_dropout=.5, fc_dropout=.5)\nmodel.eval()\ninp = torch.rand(1, c_in, 50)\noutput = model(inp)\nprint(output)\n\ntensor([[-0.0677, -0.0857]], grad_fn=<AddmmBackward0>)\n\n\n\nTracing\n\n# save to gpu, cpu or both\ntraced_cpu = torch.jit.trace(model.cpu(), inp)\nprint(traced_cpu)\ntorch.jit.save(traced_cpu, \"cpu.pt\")\n\n# load cpu or gpu model\ntraced_cpu = torch.jit.load(\"cpu.pt\")\ntest_eq(traced_cpu(inp), output)\n\n!rm \"cpu.pt\"\n\nGRUPlus(\n original_name=GRUPlus\n (backbone): _RNN_Backbone(\n original_name=_RNN_Backbone\n (to_cat_embed): Identity(original_name=Identity)\n (feature_extractor): Identity(original_name=Identity)\n (rnn): Sequential(\n original_name=Sequential\n (0): GRU(original_name=GRU)\n (1): LSTMOutput(original_name=LSTMOutput)\n )\n (transpose): Transpose(original_name=Transpose)\n )\n (head): Sequential(\n original_name=Sequential\n (0): LastStep(original_name=LastStep)\n (1): Dropout(original_name=Dropout)\n (2): Linear(original_name=Linear)\n )\n)", + "crumbs": [ + "Models", + "RNNs", + "RNNPlus" + ] + }, + { + "objectID": "models.rnnplus.html#converting-a-model-to-onnx", + "href": "models.rnnplus.html#converting-a-model-to-onnx", + "title": "RNNPlus", + "section": "Converting a model to ONNX", + "text": "Converting a model to ONNX\nimport onnx\n\ntorch.onnx.export(model.cpu(), # model being run\n inp, # model input (or a tuple for multiple inputs)\n \"cpu.onnx\", # where to save the model (can be a file or file-like object)\n export_params=True, # store the trained parameter weights inside the model file\n verbose=False,\n opset_version=13, # the ONNX version to export the model to\n do_constant_folding=True, # whether to execute constant folding for optimization\n input_names = ['input'], # the model's input names\n output_names = ['output'], # the model's output names\n dynamic_axes={\n 'input' : {0 : 'batch_size'}, \n 'output' : {0 : 'batch_size'}} # variable length axes\n )\n\n\nonnx_model = onnx.load(\"cpu.onnx\") # Load the model and check it's ok\nonnx.checker.check_model(onnx_model)\nimport onnxruntime as ort\n\nort_sess = ort.InferenceSession('cpu.onnx')\nout = ort_sess.run(None, {'input': inp.numpy()})\n\ninput_name = ort_sess.get_inputs()[0].name\noutput_name = ort_sess.get_outputs()[0].name\ninput_dims = ort_sess.get_inputs()[0].shape\n\ntest_close(out, output.detach().numpy())\n!rm \"cpu.onnx\"", + "crumbs": [ + "Models", + "RNNs", + "RNNPlus" + ] + }, + { + "objectID": "models.convtranplus.html", + "href": "models.convtranplus.html", + "title": "ConvTranPlus", + "section": "", + "text": "ConvTran: Improving Position Encoding of Transformers for Multivariate Time Series Classification\n\nThis is a Pytorch implementation of ConvTran adapted by Ignacio Oguiza and based on:\nFoumani, N. M., Tan, C. W., Webb, G. I., & Salehi, M. (2023). Improving Position Encoding of Transformers for Multivariate Time Series Classification. arXiv preprint arXiv:2305.16642.\nPre-print: https://arxiv.org/abs/2305.16642v1\nOriginal repository: https://github.com/Navidfoumani/ConvTran\n\nsource\n\ntAPE\n\n tAPE (d_model:int, seq_len=1024, dropout:float=0.1, scale_factor=1.0)\n\ntime Absolute Position Encoding\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nthe embedding dimension\n\n\nseq_len\nint\n1024\nthe max. length of the incoming sequence\n\n\ndropout\nfloat\n0.1\ndropout value\n\n\nscale_factor\nfloat\n1.0\n\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert tAPE(128, 50)(t).shape == t.shape\n\n\nsource\n\n\nAbsolutePositionalEncoding\n\n AbsolutePositionalEncoding (d_model:int, seq_len=1024, dropout:float=0.1,\n scale_factor=1.0)\n\nAbsolute positional encoding\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nthe embedding dimension\n\n\nseq_len\nint\n1024\nthe max. length of the incoming sequence\n\n\ndropout\nfloat\n0.1\ndropout value\n\n\nscale_factor\nfloat\n1.0\n\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert AbsolutePositionalEncoding(128, 50)(t).shape == t.shape\n\n\nsource\n\n\nLearnablePositionalEncoding\n\n LearnablePositionalEncoding (d_model:int, seq_len=1024,\n dropout:float=0.1)\n\nLearnable positional encoding\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nthe embedding dimension\n\n\nseq_len\nint\n1024\nthe max. length of the incoming sequence\n\n\ndropout\nfloat\n0.1\ndropout value\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert LearnablePositionalEncoding(128, 50)(t).shape == t.shape\n\n\nsource\n\n\nAttention\n\n Attention (d_model:int, n_heads:int=8, dropout:float=0.01)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nEmbedding dimension\n\n\nn_heads\nint\n8\nnumber of attention heads\n\n\ndropout\nfloat\n0.01\ndropout\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert Attention(128)(t).shape == t.shape\n\n\nsource\n\n\nAttention_Rel_Scl\n\n Attention_Rel_Scl (d_model:int, seq_len:int, n_heads:int=8,\n dropout:float=0.01)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nEmbedding dimension\n\n\nseq_len\nint\n\nsequence length\n\n\nn_heads\nint\n8\nnumber of attention heads\n\n\ndropout\nfloat\n0.01\ndropout\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert Attention_Rel_Scl(128, 50)(t).shape == t.shape\n\n\nsource\n\n\nAttention_Rel_Vec\n\n Attention_Rel_Vec (d_model:int, seq_len:int, n_heads:int=8,\n dropout:float=0.01)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\nType\nDefault\nDetails\n\n\n\n\nd_model\nint\n\nEmbedding dimension\n\n\nseq_len\nint\n\nsequence length\n\n\nn_heads\nint\n8\nnumber of attention heads\n\n\ndropout\nfloat\n0.01\ndropout\n\n\n\n\nt = torch.randn(8, 50, 128)\nassert Attention_Rel_Vec(128, 50)(t).shape == t.shape\n\n\nsource\n\n\nConvTranBackbone\n\n ConvTranBackbone (c_in:int, seq_len:int, d_model=16, n_heads:int=8,\n dim_ff:int=256, abs_pos_encode:str='tAPE',\n rel_pos_encode:str='eRPE', dropout:float=0.01)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n\nivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\n\n\n\nseq_len\nint\n\n\n\n\nd_model\nint\n16\nInternal dimension of transformer embeddings\n\n\nn_heads\nint\n8\nNumber of multi-headed attention heads\n\n\ndim_ff\nint\n256\nDimension of dense feedforward part of transformer layer\n\n\nabs_pos_encode\nstr\ntAPE\nAbsolute Position Embedding. choices={‘tAPE’, ‘sin’, ‘learned’, None}\n\n\nrel_pos_encode\nstr\neRPE\nRelative Position Embedding. choices={‘eRPE’, ‘vector’, None}\n\n\ndropout\nfloat\n0.01\nDroupout regularization ratio\n\n\n\n\nt = torch.randn(8, 5, 20)\nassert ConvTranBackbone(5, 20)(t).shape, (8, 16, 20)\n\n\nsource\n\n\nConvTranPlus\n\n ConvTranPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n d_model:int=16, n_heads:int=8, dim_ff:int=256,\n abs_pos_encode:str='tAPE', rel_pos_encode:str='eRPE',\n encoder_dropout:float=0.01, fc_dropout:float=0.1,\n use_bn:bool=True, flatten:bool=True, custom_head:Any=None)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nNumber of channels in input\n\n\nc_out\nint\n\nNumber of channels in output\n\n\nseq_len\nint\n\nNumber of input sequence length\n\n\nd\ntuple\nNone\noutput shape (excluding batch dimension).\n\n\nd_model\nint\n16\nInternal dimension of transformer embeddings\n\n\nn_heads\nint\n8\nNumber of multi-headed attention heads\n\n\ndim_ff\nint\n256\nDimension of dense feedforward part of transformer layer\n\n\nabs_pos_encode\nstr\ntAPE\nAbsolute Position Embedding. choices={‘tAPE’, ‘sin’, ‘learned’, None}\n\n\nrel_pos_encode\nstr\neRPE\nRelative Position Embedding. choices={‘eRPE’, ‘vector’, None}\n\n\nencoder_dropout\nfloat\n0.01\nDroupout regularization ratio for the encoder\n\n\nfc_dropout\nfloat\n0.1\nDroupout regularization ratio for the head\n\n\nuse_bn\nbool\nTrue\nindicates if batchnorm will be applied to the model head.\n\n\nflatten\nbool\nTrue\nthis will flatten the output of the encoder before applying the head if True.\n\n\ncustom_head\ntyping.Any\nNone\ncustom head that will be applied to the model head (optional).\n\n\n\n\nxb = torch.randn(16, 5, 20)\n\nmodel = ConvTranPlus(5, 3, 20, d=None)\noutput = model(xb)\nassert output.shape == (16, 3)\n\n\nxb = torch.randn(16, 5, 20)\n\nmodel = ConvTranPlus(5, 3, 20, d=5)\noutput = model(xb)\nassert output.shape == (16, 5, 3)\n\n\nxb = torch.randn(16, 5, 20)\n\nmodel = ConvTranPlus(5, 3, 20, d=(2, 10))\noutput = model(xb)\nassert output.shape == (16, 2, 10, 3)", + "crumbs": [ + "Models", + "Hybrid models", + "ConvTranPlus" + ] + }, + { + "objectID": "models.minirocketplus_pytorch.html", + "href": "models.minirocketplus_pytorch.html", + "title": "MINIROCKETPlus Pytorch", + "section": "", + "text": "This is a modified Pytorch implementation of MiniRocket originally developed by Malcolm McLean and Ignacio Oguiza and based on:\nDempster, A., Schmidt, D. F., & Webb, G. I. (2020). MINIROCKET: A Very Fast (Almost) Deterministic Transform for Time Series Classification. arXiv preprint arXiv:2012.08791.\nOriginal paper: https://arxiv.org/abs/2012.08791\nOriginal code: https://github.com/angus924/minirocket\n\nsource\n\nMiniRocketFeaturesPlus\n\n MiniRocketFeaturesPlus (c_in, seq_len, num_features=10000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=9, max_num_kernels=84,\n add_lsaz=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nMiniRocketPlus\n\n MiniRocketPlus (c_in, c_out, seq_len, num_features=10000,\n max_dilations_per_kernel=32, kernel_size=9,\n max_num_channels=None, max_num_kernels=84, bn=True,\n fc_dropout=0, add_lsaz=False, custom_head=None,\n zero_init=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nsource\n\n\nFlatten\n\n Flatten (*args, **kwargs)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nget_minirocket_features\n\n get_minirocket_features (o, model, chunksize=1024, use_cuda=None,\n to_np=False)\n\nFunction used to split a large dataset into chunks, avoiding OOM error.\n\nsource\n\n\nMiniRocketHead\n\n MiniRocketHead (c_in, c_out, seq_len=1, bn=True, fc_dropout=0.0)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom tsai.imports import default_device\nfrom fastai.metrics import accuracy\nfrom fastai.callback.tracker import ReduceLROnPlateau\nfrom tsai.data.all import *\nfrom tsai.learner import *\n\n\n# Offline feature calculation\ndsid = 'ECGFiveDays'\nX, y, splits = get_UCR_data(dsid, split_data=False)\nmrf = MiniRocketFeaturesPlus(c_in=X.shape[1], seq_len=X.shape[2]).to(default_device())\nX_train = X[splits[0]] # X_train may either be a np.ndarray or a torch.Tensor\nmrf.fit(X_train)\nX_tfm = get_minirocket_features(X, mrf).cpu().numpy()\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize(by_var=True)\ndls = get_ts_dls(X_tfm, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)\nlearn = ts_learner(dls, MiniRocketHead, metrics=accuracy)\nlearn.fit(1, 1e-4, cbs=ReduceLROnPlateau(factor=0.5, min_lr=1e-8, patience=10))\n\n\n# Online feature calculation\ndsid = 'ECGFiveDays'\nX, y, splits = get_UCR_data(dsid, split_data=False)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=256)\nlearn = ts_learner(dls, MiniRocketPlus, kernel_size=7, metrics=accuracy)\nlearn.fit_one_cycle(1, 1e-2)\n\n\nfrom functools import partial\nfrom fastcore.test import *\nfrom tsai.models.utils import build_ts_model\nfrom tsai.models.layers import mlp_head, rocket_nd_head\n\n\nbs, c_in, seq_len = 8, 3, 50\nc_out = 2\nxb = torch.randn(bs, c_in, seq_len)\nmodel = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, add_lsaz=True)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = build_ts_model(MiniRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, custom_head=mlp_head)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\n\n\nX = np.random.rand(8, 10, 100)\ny = np.random.rand(8, 1, 100)\nsplits = TimeSplitter(show_plot=False)(y)\ntfms = [None, TSRegression()]\nbatch_tfms = TSStandardize(by_sample=True)\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\ncustom_head = partial(rocket_nd_head, d=dls.d)\nmodel = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)\nxb,yb = dls.one_batch()\ntest_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:])\n\n\nX = np.random.rand(16, 10, 100)\ny = np.random.randint(0, 4, (16, 1, 100))\nsplits = TimeSplitter(show_plot=False)(y)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize(by_sample=True)\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\ncustom_head = partial(rocket_nd_head, d=dls.d)\nmodel = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)\nxb,yb = dls.one_batch()\ntest_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:]+(4,))\n\n\nsource\n\n\nInceptionRocketFeaturesPlus\n\n InceptionRocketFeaturesPlus (c_in, seq_len, num_features=10000,\n max_dilations_per_kernel=32,\n kernel_sizes=array([3, 5, 7, 9]),\n max_num_channels=None, max_num_kernels=84,\n add_lsaz=True, same_n_feats_per_ks=False)\n\nBase class for all neural network modules.\nYour models should also subclass this class.\nModules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\nSubmodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.\n.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.\n:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool\n\nsource\n\n\nInceptionRocketPlus\n\n InceptionRocketPlus (c_in, c_out, seq_len, num_features=10000,\n max_dilations_per_kernel=32, kernel_sizes=[3, 5, 7,\n 9], max_num_channels=None, max_num_kernels=84,\n same_n_feats_per_ks=False, add_lsaz=False, bn=True,\n fc_dropout=0, custom_head=None, zero_init=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\nfrom fastcore.test import *\nfrom tsai.models.utils import build_ts_model\n\n\nbs, c_in, seq_len = 8, 3, 50\nc_out = 2\nxb = torch.randn(bs, c_in, seq_len)\nmodel = build_ts_model(InceptionRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\nmodel = build_ts_model(InceptionRocketPlus, c_in=c_in, c_out=c_out, seq_len=seq_len, add_lsaz=True)\ntest_eq(model.to(xb.device)(xb).shape, (bs, c_out))\n\n\nX = np.random.rand(8, 10, 100)\ny = np.random.rand(8, 1, 100)\nsplits = TimeSplitter(show_plot=False)(y)\ntfms = [None, TSRegression()]\nbatch_tfms = TSStandardize(by_sample=True)\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\ncustom_head = partial(rocket_nd_head, d=dls.d)\nmodel = InceptionRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)\nxb,yb = dls.one_batch()\ntest_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:])\n\n\nX = np.random.rand(16, 10, 100)\ny = np.random.randint(0, 4, (16, 1, 100))\nsplits = TimeSplitter(show_plot=False)(y)\ntfms = [None, TSClassification()]\nbatch_tfms = TSStandardize(by_sample=True)\ndls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)\ncustom_head = partial(rocket_nd_head, d=dls.d)\nmodel = MiniRocketPlus(dls.vars, dls.c, dls.len, custom_head=custom_head)\nxb,yb = dls.one_batch()\ntest_eq(model.to(xb.device)(xb).shape[1:], y.shape[1:]+(4,))", + "crumbs": [ + "Models", + "ROCKETs", + "MINIROCKETPlus Pytorch" + ] + }, + { + "objectID": "data.transforms.html", + "href": "data.transforms.html", + "title": "Time Series Data Augmentation", + "section": "", + "text": "Functions used to transform TSTensors (Data Augmentation)\n\n\nfrom tsai.data.core import TSCategorize\nfrom tsai.data.external import get_UCR_data\nfrom tsai.data.preprocessing import TSStandardize\n\n\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\ntfms = [None, TSCategorize()]\nbatch_tfms = TSStandardize()\ndls = get_ts_dls(X, y, tfms=tfms, splits=splits, batch_tfms=batch_tfms, bs=128)\nxb, yb = next(iter(dls.train))\n\n\nsource\n\nTSIdentity\n\n TSIdentity (magnitude=None, **kwargs)\n\nApplies the identity tfm to a TSTensor batch\n\ntest_eq(TSIdentity()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSShuffle_HLs\n\n TSShuffle_HLs (magnitude=1.0, ex=None, **kwargs)\n\nRandomly shuffles HIs/LOs of an OHLC TSTensor batch\n\ntest_eq(TSShuffle_HLs()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSShuffleSteps\n\n TSShuffleSteps (magnitude=1.0, ex=None, **kwargs)\n\nRandomly shuffles consecutive sequence datapoints in batch\n\nt = TSTensor(torch.arange(11).float())\ntt_ = []\nfor _ in range(1000):\n tt = TSShuffleSteps()(t, split_idx=0)\n test_eq(len(set(tt.tolist())), len(t))\n test_ne(tt, t)\n tt_.extend([t for i,t in enumerate(tt) if t!=i])\nx, y = np.unique(tt_, return_counts=True) # This is to visualize distribution which should be equal for all and half for first and last items\nplt.bar(x, y);\n\n\n\n\n\n\n\n\n\nsource\n\n\nTSGaussianNoise\n\n TSGaussianNoise (magnitude=0.5, additive=True, ex=None, **kwargs)\n\nApplies additive or multiplicative gaussian noise\n\ntest_eq(TSGaussianNoise(.1, additive=True)(xb, split_idx=0).shape, xb.shape)\ntest_eq(TSGaussianNoise(.1, additive=False)(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSMagMulNoise\n\n TSMagMulNoise (magnitude=1, ex=None, **kwargs)\n\nApplies multiplicative noise on the y-axis for each step of a TSTensor batch\n\nsource\n\n\nTSMagAddNoise\n\n TSMagAddNoise (magnitude=1, ex=None, **kwargs)\n\nApplies additive noise on the y-axis for each step of a TSTensor batch\n\ntest_eq(TSMagAddNoise()(xb, split_idx=0).shape, xb.shape)\ntest_eq(TSMagMulNoise()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSMagAddNoise()(xb, split_idx=0), xb)\ntest_ne(TSMagMulNoise()(xb, split_idx=0), xb)\n\n\nsource\n\n\nrandom_cum_linear_generator\n\n random_cum_linear_generator (o, magnitude=0.1)\n\n\nsource\n\n\nrandom_cum_noise_generator\n\n random_cum_noise_generator (o, magnitude=0.1, noise=None)\n\n\nsource\n\n\nrandom_cum_curve_generator\n\n random_cum_curve_generator (o, magnitude=0.1, order=4, noise=None)\n\n\nsource\n\n\nrandom_curve_generator\n\n random_curve_generator (o, magnitude=0.1, order=4, noise=None)\n\n\nsource\n\n\nTSTimeNoise\n\n TSTimeNoise (magnitude=0.1, ex=None, **kwargs)\n\nApplies noise to each step in the x-axis of a TSTensor batch based on smooth random curve\n\ntest_eq(TSTimeNoise()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSTimeNoise()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSMagWarp\n\n TSMagWarp (magnitude=0.02, ord=4, ex=None, **kwargs)\n\nApplies warping to the y-axis of a TSTensor batch based on a smooth random curve\n\ntest_eq(TSMagWarp()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSMagWarp()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSTimeWarp\n\n TSTimeWarp (magnitude=0.1, ord=6, ex=None, **kwargs)\n\nApplies time warping to the x-axis of a TSTensor batch based on a smooth random curve\n\ntest_eq(TSTimeWarp()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSTimeWarp()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSWindowWarp\n\n TSWindowWarp (magnitude=0.1, ex=None, **kwargs)\n\nApplies window slicing to the x-axis of a TSTensor batch based on a random linear curve based on https://halshs.archives-ouvertes.fr/halshs-01357973/document\n\ntest_eq(TSWindowWarp()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSMagScalePerVar\n\n TSMagScalePerVar (magnitude=0.5, ex=None, **kwargs)\n\nApplies per_var scaling to the y-axis of a TSTensor batch based on a scalar\n\nsource\n\n\nTSMagScale\n\n TSMagScale (magnitude=0.5, ex=None, **kwargs)\n\nApplies scaling to the y-axis of a TSTensor batch based on a scalar\n\ntest_eq(TSMagScale()(xb, split_idx=0).shape, xb.shape)\ntest_eq(TSMagScalePerVar()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSMagScale()(xb, split_idx=0), xb)\ntest_ne(TSMagScalePerVar()(xb, split_idx=0), xb)\n\n\nsource\n\n\ntest_interpolate\n\n test_interpolate (mode='linear')\n\n\n# Run the test\ntest_interpolate('linear')\n\nlinear interpolation is not supported by mps. You can try a different mode\nError: The operator 'aten::upsample_linear1d.out' is not currently implemented for the MPS device. If you want this op to be added in priority during the prototype phase of this feature, please comment on https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be slower than running natively on MPS.\n\n\nFalse\n\n\n\ntest_interpolate('nearest')\n\nTrue\n\n\n\nsource\n\n\nTSRandomResizedCrop\n\n TSRandomResizedCrop (magnitude=0.1, size=None, scale=None, ex=None,\n mode='nearest', **kwargs)\n\nRandomly amplifies a sequence focusing on a random section of the steps\n\nif test_interpolate('nearest'):\n test_eq(TSRandomResizedCrop(.5)(xb, split_idx=0).shape, xb.shape)\n test_ne(TSRandomResizedCrop(size=.8, scale=(.5, 1))(xb, split_idx=0).shape, xb.shape)\n test_ne(TSRandomResizedCrop(size=20, scale=(.5, 1))(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSWindowSlicing\n\n TSWindowSlicing (magnitude=0.1, ex=None, mode='nearest', **kwargs)\n\nRandomly extracts an resize a ts slice based on https://halshs.archives-ouvertes.fr/halshs-01357973/document\n\nif test_interpolate('nearest'):\n test_eq(TSWindowSlicing()(xb, split_idx=0).shape, xb.shape)\n test_ne(TSWindowSlicing()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSRandomZoomOut\n\n TSRandomZoomOut (magnitude=0.1, ex=None, mode='nearest', **kwargs)\n\nRandomly compresses a sequence on the x-axis\n\nif test_interpolate('nearest'):\n test_eq(TSRandomZoomOut(.5)(xb, split_idx=0).shape, xb.shape)#\n\n\nsource\n\n\nTSRandomTimeScale\n\n TSRandomTimeScale (magnitude=0.1, ex=None, mode='nearest', **kwargs)\n\nRandomly amplifies/ compresses a sequence on the x-axis keeping the same length\n\nif test_interpolate('nearest'):\n test_eq(TSRandomTimeScale(.5)(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSRandomTimeStep\n\n TSRandomTimeStep (magnitude=0.02, ex=None, mode='nearest', **kwargs)\n\nCompresses a sequence on the x-axis by randomly selecting sequence steps and interpolating to previous size\n\nif test_interpolate('nearest'):\n test_eq(TSRandomTimeStep()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSResampleSteps\n\n TSResampleSteps (step_pct=1.0, same_seq_len=True, magnitude=None,\n **kwargs)\n\nTransform that randomly selects and sorts sequence steps (with replacement) maintaining the sequence length\n\ntest_eq(TSResampleSteps(step_pct=.9, same_seq_len=False)(xb, split_idx=0).shape[-1], round(.9*xb.shape[-1]))\ntest_eq(TSResampleSteps(step_pct=.9, same_seq_len=True)(xb, split_idx=0).shape[-1], xb.shape[-1])\n\n\nsource\n\n\nTSBlur\n\n TSBlur (magnitude=1.0, ex=None, filt_len=None, **kwargs)\n\nBlurs a sequence applying a filter of type [1, 0, 1]\n\ntest_eq(TSBlur(filt_len=7)(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSBlur()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSSmooth\n\n TSSmooth (magnitude=1.0, ex=None, filt_len=None, **kwargs)\n\nSmoothens a sequence applying a filter of type [1, 5, 1]\n\ntest_eq(TSSmooth(filt_len=7)(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSSmooth()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSFreqDenoise\n\n TSFreqDenoise (magnitude=0.1, ex=None, wavelet='db4', level=2, thr=None,\n thr_mode='hard', pad_mode='per', **kwargs)\n\nDenoises a sequence applying a wavelet decomposition method\n\nsource\n\n\nmaddest\n\n maddest (d, axis=None)\n\n\ntry: import pywt\nexcept ImportError: pass\n\n\nif 'pywt' in dir():\n test_eq(TSFreqDenoise()(xb, split_idx=0).shape, xb.shape)\n test_ne(TSFreqDenoise()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSRandomFreqNoise\n\n TSRandomFreqNoise (magnitude=0.1, ex=None, wavelet='db4', level=2,\n mode='constant', **kwargs)\n\nApplys random noise using a wavelet decomposition method\n\nif 'pywt' in dir():\n test_eq(TSRandomFreqNoise()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSRandomResizedLookBack\n\n TSRandomResizedLookBack (magnitude=0.1, mode='nearest', **kwargs)\n\nSelects a random number of sequence steps starting from the end and return an output of the same shape\n\nif test_interpolate('nearest'):\n for i in range(100):\n o = TSRandomResizedLookBack()(xb, split_idx=0)\n test_eq(o.shape[-1], xb.shape[-1])\n\n\nsource\n\n\nTSRandomLookBackOut\n\n TSRandomLookBackOut (magnitude=0.1, **kwargs)\n\nSelects a random number of sequence steps starting from the end and set them to zero\n\nfor i in range(100):\n o = TSRandomLookBackOut()(xb, split_idx=0)\n test_eq(o.shape[-1], xb.shape[-1])\n\n\nsource\n\n\nTSVarOut\n\n TSVarOut (magnitude=0.05, ex=None, **kwargs)\n\nSet the value of a random number of variables to zero\n\ntest_eq(TSVarOut()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSCutOut\n\n TSCutOut (magnitude=0.05, ex=None, **kwargs)\n\nSets a random section of the sequence to zero\n\ntest_eq(TSCutOut()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSTimeStepOut\n\n TSTimeStepOut (magnitude=0.05, ex=None, **kwargs)\n\nSets random sequence steps to zero\n\ntest_eq(TSTimeStepOut()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSRandomCropPad\n\n TSRandomCropPad (magnitude=0.05, ex=None, **kwargs)\n\nCrops a section of the sequence of a random length\n\ntest_eq(TSRandomCropPad()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSMaskOut\n\n TSMaskOut (magnitude=0.1, compensate:bool=False, ex=None, **kwargs)\n\nApplies a random mask\n\ntest_eq(TSMaskOut()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSMaskOut()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSInputDropout\n\n TSInputDropout (magnitude=0.0, ex=None, **kwargs)\n\nApplies input dropout with required_grad=False\n\ntest_eq(TSInputDropout(.1)(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSInputDropout(.1)(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSTranslateX\n\n TSTranslateX (magnitude=0.1, ex=None, **kwargs)\n\nMoves a selected sequence window a random number of steps\n\ntest_eq(TSTranslateX()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSRandomShift\n\n TSRandomShift (magnitude=0.02, ex=None, **kwargs)\n\nShifts and splits a sequence\n\ntest_eq(TSRandomShift()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSHorizontalFlip\n\n TSHorizontalFlip (magnitude=1.0, ex=None, **kwargs)\n\nFlips the sequence along the x-axis\n\ntest_eq(TSHorizontalFlip()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSHorizontalFlip()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSRandomTrend\n\n TSRandomTrend (magnitude=0.1, ex=None, **kwargs)\n\nRandomly rotates the sequence along the z-axis\n\ntest_eq(TSRandomTrend()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSVerticalFlip\n\n TSVerticalFlip (magnitude=1.0, ex=None, **kwargs)\n\nApplies a negative value to the time sequence\n\ntest_eq(TSVerticalFlip()(xb, split_idx=0).shape, xb.shape)\ntest_ne(TSVerticalFlip()(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSResize\n\n TSResize (magnitude=-0.5, size=None, ex=None, mode='nearest', **kwargs)\n\nResizes the sequence length of a time series\n\nif test_interpolate('nearest'):\n for sz in np.linspace(.2, 2, 10): test_eq(TSResize(sz)(xb, split_idx=0).shape[-1], int(round(xb.shape[-1]*(1+sz))))\n test_ne(TSResize(1)(xb, split_idx=0), xb)\n\n\nsource\n\n\nTSRandomSize\n\n TSRandomSize (magnitude=0.1, ex=None, mode='nearest', **kwargs)\n\nRandomly resizes the sequence length of a time series\n\nif test_interpolate('nearest'):\n seq_len_ = []\n for i in range(100):\n o = TSRandomSize(.5)(xb, split_idx=0)\n seq_len_.append(o.shape[-1])\n test_lt(min(seq_len_), xb.shape[-1])\n test_gt(max(seq_len_), xb.shape[-1])\n\n\nsource\n\n\nTSRandomLowRes\n\n TSRandomLowRes (magnitude=0.5, ex=None, mode='nearest', **kwargs)\n\nRandomly resizes the sequence length of a time series to a lower resolution\n\nsource\n\n\nTSDownUpScale\n\n TSDownUpScale (magnitude=0.5, ex=None, mode='nearest', **kwargs)\n\nDownscales a time series and upscales it again to previous sequence length\n\nif test_interpolate('nearest'):\n test_eq(TSDownUpScale()(xb, split_idx=0).shape, xb.shape)\n\n\nsource\n\n\nTSRandomDownUpScale\n\n TSRandomDownUpScale (magnitude=0.5, ex=None, mode='nearest', **kwargs)\n\nRandomly downscales a time series and upscales it again to previous sequence length\n\nif test_interpolate('nearest'):\n test_eq(TSRandomDownUpScale()(xb, split_idx=0).shape, xb.shape)\n test_ne(TSDownUpScale()(xb, split_idx=0), xb)\n test_eq(TSDownUpScale()(xb, split_idx=1), xb)\n\n\nsource\n\n\nTSRandomConv\n\n TSRandomConv (magnitude=0.05, ex=None, ks=[1, 3, 5, 7], **kwargs)\n\nApplies a convolution with a random kernel and random weights with required_grad=False\n\nfor i in range(5):\n o = TSRandomConv(magnitude=0.05, ex=None, ks=[1, 3, 5, 7])(xb, split_idx=0)\n test_eq(o.shape, xb.shape)\n\n\nsource\n\n\nTSRandom2Value\n\n TSRandom2Value (magnitude=0.1, sel_vars=None, sel_steps=None,\n static=False, value=nan, **kwargs)\n\nRandomly sets selected variables of type TSTensor to predefined value (default: np.nan)\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=0.5, sel_vars=None, sel_steps=None, static=False, value=0)(t, split_idx=0).data\n\ntensor([[[0., 0., 1., 0., 1., 1., 0., 1., 1., 0.],\n [1., 1., 0., 1., 1., 1., 1., 1., 1., 0.],\n [1., 1., 1., 1., 1., 0., 0., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 0., 1., 1., 0., 1.],\n [0., 0., 0., 0., 0., 1., 0., 1., 0., 1.],\n [0., 1., 0., 1., 0., 0., 0., 1., 0., 0.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=0.5, sel_vars=[1], sel_steps=slice(-5, None), static=False, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 0., 1., 0., 0., 0.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 0., 1., 0., 0., 0.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=0.5, sel_vars=[1], sel_steps=None, static=True, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=1, sel_vars=1, sel_steps=None, static=False, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=1, sel_vars=[1,2], sel_steps=None, static=False, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=1, sel_vars=1, sel_steps=[1,3,5], static=False, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2, 3, 10))\nTSRandom2Value(magnitude=1, sel_vars=[1,2], sel_steps=[1,3,5], static=False, value=0)(t, split_idx=0).data\n\ntensor([[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.]],\n\n [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.],\n [1., 0., 1., 0., 1., 0., 1., 1., 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2,3,4))\nTSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0).data\n\ntensor([[[1., nan, nan, 1.],\n [1., 1., 1., 1.],\n [1., nan, 1., 1.]],\n\n [[nan, 1., 1., nan],\n [1., 1., 1., 1.],\n [nan, nan, 1., 1.]]])\n\n\n\nt = TSTensor(torch.ones(2,3,4))\nTSRandom2Value(magnitude=.5, sel_steps=slice(2, None))(t, split_idx=0).data\n\ntensor([[[1., 1., 1., nan],\n [1., 1., nan, 1.],\n [1., 1., nan, nan]],\n\n [[1., 1., nan, 1.],\n [1., 1., nan, nan],\n [1., 1., nan, 1.]]])\n\n\n\nt = TSTensor(torch.ones(2,3,100))\ntest_gt(np.isnan(TSRandom2Value(magnitude=.5)(t, split_idx=0)).sum().item(), 0)\nt = TSTensor(torch.ones(2,3,100))\ntest_gt(np.isnan(TSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0)[:, [0,2]]).sum().item(), 0)\nt = TSTensor(torch.ones(2,3,100))\ntest_eq(np.isnan(TSRandom2Value(magnitude=.5, sel_vars=[0,2])(t, split_idx=0)[:, 1]).sum().item(), 0)\n\n\nsource\n\n\nTSMask2Value\n\n TSMask2Value (mask_fn, value=nan, sel_vars=None, **kwargs)\n\nRandomly sets selected variables of type TSTensor to predefined value (default: np.nan)\n\nt = TSTensor(torch.ones(2,3,100))\ndef _mask_fn(o, r=.15, value=np.nan):\n return torch.rand_like(o) > (1-r)\ntest_gt(np.isnan(TSMask2Value(_mask_fn)(t, split_idx=0)).sum().item(), 0)\n\n\nsource\n\n\nTSSelfDropout\n\n TSSelfDropout (p:float=1.0, nm:str=None, before_call:callable=None,\n **kwargs)\n\nApplies dropout to a tensor with nan values by rotating axis=0 inplace\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\np\nfloat\n1.0\nProbability of applying Transform\n\n\nnm\nstr\nNone\n\n\n\nbefore_call\ncallable\nNone\nOptional batchwise preprocessing function\n\n\nkwargs\n\n\n\n\n\n\n\nsource\n\n\nself_mask\n\n self_mask (o)\n\n\nt = TSTensor(torch.ones(2,3,100))\nmask = torch.rand_like(t) > .7\nt[mask] = np.nan\nnan_perc = np.isnan(t).float().mean().item()\nt2 = TSSelfDropout()(t, split_idx=0)\ntest_gt(torch.isnan(t2).float().mean().item(), nan_perc)\nnan_perc, torch.isnan(t2).float().mean().item()\n\n(0.30000001192092896, 0.49000000953674316)\n\n\n\nsource\n\n\nRandAugment\n\n RandAugment (tfms:list, N:int=1, M:int=3, **kwargs)\n\nA transform that before_call its state at each __call__\n\ntest_ne(RandAugment(TSMagAddNoise, N=5, M=10)(xb, split_idx=0), xb)\n\n\nsource\n\n\nTestTfm\n\n TestTfm (tfm, magnitude=1.0, ex=None, **kwargs)\n\nUtility class to test the output of selected tfms during training\n\nsource\n\n\nget_tfm_name\n\n get_tfm_name (tfm)\n\n\ntest_eq(get_tfm_name(partial(TSMagScale()))==get_tfm_name((partial(TSMagScale()), 0.1, .05))==get_tfm_name(TSMagScale())==get_tfm_name((TSMagScale(), 0.1, .05)), True)\n\n\nall_TS_randaugs_names = [get_tfm_name(t) for t in all_TS_randaugs]", + "crumbs": [ + "Data", + "Time Series Data Augmentation" + ] + }, + { + "objectID": "models.rnnattentionplus.html", + "href": "models.rnnattentionplus.html", + "title": "RNNAttentionPlus", + "section": "", + "text": "This is an custom PyTorch implementation by @yangtzech, based on TST implementation of Ignacio Oguiza." + }, + { + "objectID": "models.rnnattentionplus.html#arguments", + "href": "models.rnnattentionplus.html#arguments", + "title": "RNNAttentionPlus", + "section": "Arguments", + "text": "Arguments\nUsual values are the ones that appear in the “Attention is all you need” and “A Transformer-based Framework for Multivariate Time Series Representation Learning” papers. And some parameters are necessary for the RNN part.\nThe default values are the ones selected as a default configuration in the latter.\n\nc_in: the number of features (aka variables, dimensions, channels) in the time series dataset. dls.var\nc_out: the number of target classes. dls.c\nseq_len: number of time steps in the time series. dls.len\nhidden_size: the number of features in the hidden state in the RNN model. Default: 128.\nrnn_layers: the number of recurrent layers of the RNN model. Default: 1.\nbias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True\nrnn_dropout: If non-zero, introduces a Dropout layer on the outputs of each RNN layer except the last layer, with dropout probability equal to :attr:rnn_dropout. Default: 0\nbidirectional: If True, becomes a bidirectional RNN. Default: False\nn_heads: parallel attention heads. Usual values: 8-16. Default: 16.\nd_k: size of the learned linear projection of queries and keys in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_v: size of the learned linear projection of values in the MHA. Usual values: 16-512. Default: None -> (d_model/n_heads) = 32.\nd_ff: the dimension of the feedforward network model. Usual values: 256-4096. Default: 256.\nencoder_dropout: amount of residual dropout applied in the encoder. Usual values: 0.-0.3. Default: 0.1.\nact: the activation function of intermediate layer, relu or gelu. Default: ‘gelu’.\nencoder_layers: the number of sub-encoder-layers in the encoder. Usual values: 2-8. Default: 3.\nfc_dropout: dropout applied to the final fully connected layer. Usual values: 0.-0.8. Default: 0.\ny_range: range of possible y values (used in regression tasks). Default: None" + }, + { + "objectID": "models.rnnattentionplus.html#imports", + "href": "models.rnnattentionplus.html#imports", + "title": "RNNAttentionPlus", + "section": "Imports", + "text": "Imports" + }, + { + "objectID": "models.rnnattentionplus.html#rnnattentionplus", + "href": "models.rnnattentionplus.html#rnnattentionplus", + "title": "RNNAttentionPlus", + "section": "RNNAttentionPlus", + "text": "RNNAttentionPlus\n\nt = torch.rand(16, 50, 128)\noutput, attn = _MultiHeadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t)\noutput.shape, attn.shape\n\n(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))\n\n\n\nt = torch.rand(16, 50, 128)\noutput = _TSTEncoderLayer(q_len=50, d_model=128, n_heads=3, d_k=None, d_v=None, d_ff=512, dropout=0.1, activation='gelu')(t)\noutput.shape\n\ntorch.Size([16, 50, 128])\n\n\n\nsource\n\nGRUAttentionPlus\n\n GRUAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n hidden_size:int=128, rnn_layers:int=1, bias:bool=True,\n rnn_dropout:float=0, bidirectional=False,\n encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1,\n act:str='gelu', fc_dropout:float=0.0,\n y_range:Optional[tuple]=None, custom_head=None,\n use_bn:bool=True, flatten:bool=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nthe number of features (aka variables, dimensions, channels) in the time series dataset.\n\n\nc_out\nint\n\nthe number of target classes.\n\n\nseq_len\nint\n\nnumber of time steps in the time series.\n\n\nd\ntuple\nNone\noutput shape (excluding batch dimension).\n\n\nhidden_size\nint\n128\nthe number of features in the hidden state h\n\n\nrnn_layers\nint\n1\nthe number of recurrent layers of the RNN model.\n\n\nbias\nbool\nTrue\nIf False, then the layer does not use bias weights b_ih and b_hh.\n\n\nrnn_dropout\nfloat\n0\nrnn dropout applied to the output of each RNN layer except the last layer.\n\n\nbidirectional\nbool\nFalse\nIf True, becomes a bidirectional RNN. Default: False\n\n\nencoder_layers\nint\n3\nthe number of sub-encoder-layers in the encoder.\n\n\nn_heads\nint\n16\nparallel attention heads.\n\n\nd_k\ntyping.Optional[int]\nNone\nsize of the learned linear projection of queries and keys in the MHA.\n\n\nd_v\ntyping.Optional[int]\nNone\nsize of the learned linear projection of values in the MHA.\n\n\nd_ff\nint\n256\nthe dimension of the feedforward network model.\n\n\nencoder_dropout\nfloat\n0.1\namount of residual dropout applied in the encoder.\n\n\nact\nstr\ngelu\nthe activation function of intermediate layer, relu or gelu.\n\n\nfc_dropout\nfloat\n0.0\ndropout applied to the final fully connected layer.\n\n\ny_range\ntyping.Optional[tuple]\nNone\nrange of possible y values (used in regression tasks).\n\n\ncustom_head\nNoneType\nNone\ncustom head that will be applied to the model head (optional).\n\n\nuse_bn\nbool\nTrue\nindicates if batchnorm will be applied to the model head.\n\n\nflatten\nbool\nTrue\nthis will flatten the output of the encoder before applying the head if True.\n\n\n\n\nsource\n\n\nLSTMAttentionPlus\n\n LSTMAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n hidden_size:int=128, rnn_layers:int=1, bias:bool=True,\n rnn_dropout:float=0, bidirectional=False,\n encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1,\n act:str='gelu', fc_dropout:float=0.0,\n y_range:Optional[tuple]=None, custom_head=None,\n use_bn:bool=True, flatten:bool=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nthe number of features (aka variables, dimensions, channels) in the time series dataset.\n\n\nc_out\nint\n\nthe number of target classes.\n\n\nseq_len\nint\n\nnumber of time steps in the time series.\n\n\nd\ntuple\nNone\noutput shape (excluding batch dimension).\n\n\nhidden_size\nint\n128\nthe number of features in the hidden state h\n\n\nrnn_layers\nint\n1\nthe number of recurrent layers of the RNN model.\n\n\nbias\nbool\nTrue\nIf False, then the layer does not use bias weights b_ih and b_hh.\n\n\nrnn_dropout\nfloat\n0\nrnn dropout applied to the output of each RNN layer except the last layer.\n\n\nbidirectional\nbool\nFalse\nIf True, becomes a bidirectional RNN. Default: False\n\n\nencoder_layers\nint\n3\nthe number of sub-encoder-layers in the encoder.\n\n\nn_heads\nint\n16\nparallel attention heads.\n\n\nd_k\ntyping.Optional[int]\nNone\nsize of the learned linear projection of queries and keys in the MHA.\n\n\nd_v\ntyping.Optional[int]\nNone\nsize of the learned linear projection of values in the MHA.\n\n\nd_ff\nint\n256\nthe dimension of the feedforward network model.\n\n\nencoder_dropout\nfloat\n0.1\namount of residual dropout applied in the encoder.\n\n\nact\nstr\ngelu\nthe activation function of intermediate layer, relu or gelu.\n\n\nfc_dropout\nfloat\n0.0\ndropout applied to the final fully connected layer.\n\n\ny_range\ntyping.Optional[tuple]\nNone\nrange of possible y values (used in regression tasks).\n\n\ncustom_head\nNoneType\nNone\ncustom head that will be applied to the model head (optional).\n\n\nuse_bn\nbool\nTrue\nindicates if batchnorm will be applied to the model head.\n\n\nflatten\nbool\nTrue\nthis will flatten the output of the encoder before applying the head if True.\n\n\n\n\nsource\n\n\nRNNAttentionPlus\n\n RNNAttentionPlus (c_in:int, c_out:int, seq_len:int, d:tuple=None,\n hidden_size:int=128, rnn_layers:int=1, bias:bool=True,\n rnn_dropout:float=0, bidirectional=False,\n encoder_layers:int=3, n_heads:int=16,\n d_k:Optional[int]=None, d_v:Optional[int]=None,\n d_ff:int=256, encoder_dropout:float=0.1,\n act:str='gelu', fc_dropout:float=0.0,\n y_range:Optional[tuple]=None, custom_head=None,\n use_bn:bool=True, flatten:bool=True)\n\nA sequential container.\nModules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) accepts any input and forwards it to the first module it contains. It then “chains” outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.\nThe value a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) applies to each of the modules it stores (which are each a registered submodule of the [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential)).\nWhat’s the difference between a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like–a list for storing Module s! On the other hand, the layers in a [Sequential](https://timeseriesAI.github.io/models.layers.html#sequential) are connected in a cascading way.\nExample::\n# Using Sequential to create a small model. When `model` is run,\n# input will first be passed to `Conv2d(1,20,5)`. The output of\n# `Conv2d(1,20,5)` will be used as the input to the first\n# `ReLU`; the output of the first `ReLU` will become the input\n# for `Conv2d(20,64,5)`. Finally, the output of\n# `Conv2d(20,64,5)` will be used as input to the second `ReLU`\nmodel = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n# Using Sequential with OrderedDict. This is functionally the\n# same as the above code\nmodel = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n\n\n\n\n\n\n\n\n\nType\nDefault\nDetails\n\n\n\n\nc_in\nint\n\nthe number of features (aka variables, dimensions, channels) in the time series dataset.\n\n\nc_out\nint\n\nthe number of target classes.\n\n\nseq_len\nint\n\nnumber of time steps in the time series.\n\n\nd\ntuple\nNone\noutput shape (excluding batch dimension).\n\n\nhidden_size\nint\n128\nthe number of features in the hidden state h\n\n\nrnn_layers\nint\n1\nthe number of recurrent layers of the RNN model.\n\n\nbias\nbool\nTrue\nIf False, then the layer does not use bias weights b_ih and b_hh.\n\n\nrnn_dropout\nfloat\n0\nrnn dropout applied to the output of each RNN layer except the last layer.\n\n\nbidirectional\nbool\nFalse\nIf True, becomes a bidirectional RNN. Default: False\n\n\nencoder_layers\nint\n3\nthe number of sub-encoder-layers in the encoder.\n\n\nn_heads\nint\n16\nparallel attention heads.\n\n\nd_k\ntyping.Optional[int]\nNone\nsize of the learned linear projection of queries and keys in the MHA.\n\n\nd_v\ntyping.Optional[int]\nNone\nsize of the learned linear projection of values in the MHA.\n\n\nd_ff\nint\n256\nthe dimension of the feedforward network model.\n\n\nencoder_dropout\nfloat\n0.1\namount of residual dropout applied in the encoder.\n\n\nact\nstr\ngelu\nthe activation function of intermediate layer, relu or gelu.\n\n\nfc_dropout\nfloat\n0.0\ndropout applied to the final fully connected layer.\n\n\ny_range\ntyping.Optional[tuple]\nNone\nrange of possible y values (used in regression tasks).\n\n\ncustom_head\nNoneType\nNone\ncustom head that will be applied to the model head (optional).\n\n\nuse_bn\nbool\nTrue\nindicates if batchnorm will be applied to the model head.\n\n\nflatten\nbool\nTrue\nthis will flatten the output of the encoder before applying the head if True.\n\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 500\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nhidden_size = 128\nrnn_layers=1\nbias=True\nrnn_dropout=0\nbidirectional=False\nencoder_layers=3\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\nencoder_dropout = 0.1\nact = \"gelu\"\nfc_dropout = 0.1\nkwargs = {}\n\nmodel = RNNAttentionPlus(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,\n encoder_layers=encoder_layers, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, \n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 541698\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\n\nxb = torch.randn(bs, c_in, seq_len)\n\n# standardize by channel by_var based on the training set\nxb = (xb - xb.mean((0, 2), keepdim=True)) / xb.std((0, 2), keepdim=True)\n\n# Settings\nhidden_size = 128\nrnn_layers=1\nbias=True\nrnn_dropout=0\nbidirectional=False\nencoder_layers=3\nn_heads = 16\nd_k = d_v = None # if None --> d_model // n_heads\nd_ff = 256\nencoder_dropout = 0.1\nact = \"gelu\"\nfc_dropout = 0.1\nkwargs = {}\n# kwargs = dict(kernel_size=5, padding=2)\n\nmodel = RNNAttentionPlus(c_in, c_out, seq_len, hidden_size=hidden_size, rnn_layers=rnn_layers, bias=bias, rnn_dropout=rnn_dropout, bidirectional=bidirectional,\n encoder_layers=encoder_layers, n_heads=n_heads,\n d_k=d_k, d_v=d_v, d_ff=d_ff, encoder_dropout=encoder_dropout, act=act, \n fc_dropout=fc_dropout, **kwargs)\ntest_eq(model.to(xb.device)(xb).shape, [bs, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 429058\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\nd = 10\n\nxb = torch.randn(bs, c_in, seq_len)\nmodel = RNNAttentionPlus(c_in, c_out, seq_len, d=d)\ntest_eq(model.to(xb.device)(xb).shape, [bs, d, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 567572\n\n\n\nbs = 32\nc_in = 9 # aka channels, features, variables, dimensions\nc_out = 2\nseq_len = 60\nd = (3, 10)\n\nxb = torch.randn(bs, c_in, seq_len)\nmodel = RNNAttentionPlus(c_in, c_out, seq_len, d=d)\ntest_eq(model.to(xb.device)(xb).shape, [bs, *d, c_out])\nprint(f'model parameters: {count_parameters(model)}')\n\nmodel parameters: 874812" + } +] \ No newline at end of file diff --git a/site_libs/bootstrap/bootstrap-icons.css b/site_libs/bootstrap/bootstrap-icons.css new file mode 100644 index 000000000..285e4448f --- /dev/null +++ b/site_libs/bootstrap/bootstrap-icons.css @@ -0,0 +1,2078 @@ +/*! + * Bootstrap Icons v1.11.1 (https://icons.getbootstrap.com/) + * Copyright 2019-2023 The Bootstrap Authors + * Licensed under MIT (https://github.com/twbs/icons/blob/main/LICENSE) + */ + +@font-face { + font-display: block; + font-family: "bootstrap-icons"; + src: +url("./bootstrap-icons.woff?2820a3852bdb9a5832199cc61cec4e65") format("woff"); +} + +.bi::before, +[class^="bi-"]::before, +[class*=" bi-"]::before { + display: inline-block; + font-family: bootstrap-icons !important; + font-style: normal; + font-weight: normal !important; + font-variant: normal; + text-transform: none; + line-height: 1; + vertical-align: -.125em; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +.bi-123::before { content: "\f67f"; } +.bi-alarm-fill::before { content: "\f101"; } +.bi-alarm::before { content: "\f102"; } +.bi-align-bottom::before { content: "\f103"; } +.bi-align-center::before { content: "\f104"; } +.bi-align-end::before { content: "\f105"; } +.bi-align-middle::before { content: "\f106"; } +.bi-align-start::before { content: "\f107"; } +.bi-align-top::before { content: "\f108"; } +.bi-alt::before { content: "\f109"; } +.bi-app-indicator::before { content: "\f10a"; } +.bi-app::before { content: "\f10b"; } +.bi-archive-fill::before { content: "\f10c"; } +.bi-archive::before { content: "\f10d"; } +.bi-arrow-90deg-down::before { content: "\f10e"; } +.bi-arrow-90deg-left::before { content: "\f10f"; } +.bi-arrow-90deg-right::before { content: "\f110"; } +.bi-arrow-90deg-up::before { content: "\f111"; } +.bi-arrow-bar-down::before { content: "\f112"; } +.bi-arrow-bar-left::before { content: "\f113"; } +.bi-arrow-bar-right::before { content: "\f114"; } +.bi-arrow-bar-up::before { content: "\f115"; } +.bi-arrow-clockwise::before { content: "\f116"; } +.bi-arrow-counterclockwise::before { content: "\f117"; } +.bi-arrow-down-circle-fill::before { content: "\f118"; } +.bi-arrow-down-circle::before { content: "\f119"; } +.bi-arrow-down-left-circle-fill::before { content: "\f11a"; } +.bi-arrow-down-left-circle::before { content: "\f11b"; } +.bi-arrow-down-left-square-fill::before { content: "\f11c"; } +.bi-arrow-down-left-square::before { content: "\f11d"; } +.bi-arrow-down-left::before { content: "\f11e"; } +.bi-arrow-down-right-circle-fill::before { content: "\f11f"; } +.bi-arrow-down-right-circle::before { content: "\f120"; } +.bi-arrow-down-right-square-fill::before { content: "\f121"; } +.bi-arrow-down-right-square::before { content: "\f122"; } +.bi-arrow-down-right::before { content: "\f123"; } +.bi-arrow-down-short::before { content: "\f124"; } +.bi-arrow-down-square-fill::before { content: "\f125"; } +.bi-arrow-down-square::before { content: "\f126"; } +.bi-arrow-down-up::before { content: "\f127"; } +.bi-arrow-down::before { content: "\f128"; } +.bi-arrow-left-circle-fill::before { content: "\f129"; } +.bi-arrow-left-circle::before { content: "\f12a"; } +.bi-arrow-left-right::before { content: "\f12b"; } +.bi-arrow-left-short::before { content: "\f12c"; } +.bi-arrow-left-square-fill::before { content: "\f12d"; } +.bi-arrow-left-square::before { content: "\f12e"; } +.bi-arrow-left::before { content: "\f12f"; } +.bi-arrow-repeat::before { content: "\f130"; } +.bi-arrow-return-left::before { content: "\f131"; } +.bi-arrow-return-right::before { content: "\f132"; } +.bi-arrow-right-circle-fill::before { content: "\f133"; } +.bi-arrow-right-circle::before { content: "\f134"; } +.bi-arrow-right-short::before { content: "\f135"; } +.bi-arrow-right-square-fill::before { content: "\f136"; } +.bi-arrow-right-square::before { content: "\f137"; } +.bi-arrow-right::before { content: "\f138"; } +.bi-arrow-up-circle-fill::before { content: "\f139"; } +.bi-arrow-up-circle::before { content: "\f13a"; } +.bi-arrow-up-left-circle-fill::before { content: "\f13b"; } +.bi-arrow-up-left-circle::before { content: "\f13c"; } +.bi-arrow-up-left-square-fill::before { content: "\f13d"; } +.bi-arrow-up-left-square::before { content: "\f13e"; } +.bi-arrow-up-left::before { content: "\f13f"; } +.bi-arrow-up-right-circle-fill::before { content: "\f140"; } +.bi-arrow-up-right-circle::before { content: "\f141"; } +.bi-arrow-up-right-square-fill::before { content: "\f142"; } +.bi-arrow-up-right-square::before { content: "\f143"; } +.bi-arrow-up-right::before { content: "\f144"; } +.bi-arrow-up-short::before { content: "\f145"; } +.bi-arrow-up-square-fill::before { content: "\f146"; } +.bi-arrow-up-square::before { content: "\f147"; } +.bi-arrow-up::before { content: "\f148"; } +.bi-arrows-angle-contract::before { content: "\f149"; } +.bi-arrows-angle-expand::before { content: "\f14a"; } +.bi-arrows-collapse::before { content: "\f14b"; } +.bi-arrows-expand::before { content: "\f14c"; } +.bi-arrows-fullscreen::before { content: "\f14d"; } +.bi-arrows-move::before { content: "\f14e"; } +.bi-aspect-ratio-fill::before { content: "\f14f"; } +.bi-aspect-ratio::before { content: "\f150"; } +.bi-asterisk::before { content: "\f151"; } +.bi-at::before { content: "\f152"; } +.bi-award-fill::before { content: "\f153"; } +.bi-award::before { content: "\f154"; } +.bi-back::before { content: "\f155"; } +.bi-backspace-fill::before { content: "\f156"; } +.bi-backspace-reverse-fill::before { content: "\f157"; } +.bi-backspace-reverse::before { content: "\f158"; } +.bi-backspace::before { content: "\f159"; } +.bi-badge-3d-fill::before { content: "\f15a"; } +.bi-badge-3d::before { content: "\f15b"; } +.bi-badge-4k-fill::before { content: "\f15c"; } +.bi-badge-4k::before { content: "\f15d"; } +.bi-badge-8k-fill::before { content: "\f15e"; } +.bi-badge-8k::before { content: "\f15f"; } +.bi-badge-ad-fill::before { content: "\f160"; } +.bi-badge-ad::before { content: "\f161"; } +.bi-badge-ar-fill::before { content: "\f162"; } +.bi-badge-ar::before { content: "\f163"; } +.bi-badge-cc-fill::before { content: "\f164"; } +.bi-badge-cc::before { content: "\f165"; } +.bi-badge-hd-fill::before { content: "\f166"; } +.bi-badge-hd::before { content: "\f167"; } +.bi-badge-tm-fill::before { content: "\f168"; } +.bi-badge-tm::before { content: "\f169"; } +.bi-badge-vo-fill::before { content: "\f16a"; } +.bi-badge-vo::before { content: "\f16b"; } +.bi-badge-vr-fill::before { content: "\f16c"; } +.bi-badge-vr::before { content: "\f16d"; } +.bi-badge-wc-fill::before { content: "\f16e"; } +.bi-badge-wc::before { content: "\f16f"; } +.bi-bag-check-fill::before { content: "\f170"; } +.bi-bag-check::before { content: "\f171"; } +.bi-bag-dash-fill::before { content: "\f172"; } +.bi-bag-dash::before { content: "\f173"; } +.bi-bag-fill::before { content: "\f174"; } +.bi-bag-plus-fill::before { content: "\f175"; } +.bi-bag-plus::before { content: "\f176"; } +.bi-bag-x-fill::before { content: "\f177"; } +.bi-bag-x::before { content: "\f178"; } +.bi-bag::before { content: "\f179"; } +.bi-bar-chart-fill::before { content: "\f17a"; } +.bi-bar-chart-line-fill::before { content: "\f17b"; } +.bi-bar-chart-line::before { content: "\f17c"; } +.bi-bar-chart-steps::before { content: "\f17d"; } +.bi-bar-chart::before { content: "\f17e"; } +.bi-basket-fill::before { content: "\f17f"; } +.bi-basket::before { content: "\f180"; } +.bi-basket2-fill::before { content: "\f181"; } +.bi-basket2::before { content: "\f182"; } +.bi-basket3-fill::before { content: "\f183"; } +.bi-basket3::before { content: "\f184"; } +.bi-battery-charging::before { content: "\f185"; } +.bi-battery-full::before { content: "\f186"; } +.bi-battery-half::before { content: "\f187"; } +.bi-battery::before { content: "\f188"; } +.bi-bell-fill::before { content: "\f189"; } +.bi-bell::before { content: "\f18a"; } +.bi-bezier::before { content: "\f18b"; } +.bi-bezier2::before { content: "\f18c"; } +.bi-bicycle::before { content: "\f18d"; } +.bi-binoculars-fill::before { content: "\f18e"; } +.bi-binoculars::before { content: "\f18f"; } +.bi-blockquote-left::before { content: "\f190"; } +.bi-blockquote-right::before { content: "\f191"; } +.bi-book-fill::before { content: "\f192"; } +.bi-book-half::before { content: "\f193"; } +.bi-book::before { content: "\f194"; } +.bi-bookmark-check-fill::before { content: "\f195"; } +.bi-bookmark-check::before { content: "\f196"; } +.bi-bookmark-dash-fill::before { content: "\f197"; } +.bi-bookmark-dash::before { content: "\f198"; } +.bi-bookmark-fill::before { content: "\f199"; } +.bi-bookmark-heart-fill::before { content: "\f19a"; } +.bi-bookmark-heart::before { content: "\f19b"; } +.bi-bookmark-plus-fill::before { content: "\f19c"; } +.bi-bookmark-plus::before { content: "\f19d"; } +.bi-bookmark-star-fill::before { content: "\f19e"; } +.bi-bookmark-star::before { content: "\f19f"; } +.bi-bookmark-x-fill::before { content: "\f1a0"; } +.bi-bookmark-x::before { content: "\f1a1"; } +.bi-bookmark::before { content: "\f1a2"; } +.bi-bookmarks-fill::before { content: "\f1a3"; } +.bi-bookmarks::before { content: "\f1a4"; } +.bi-bookshelf::before { content: "\f1a5"; } +.bi-bootstrap-fill::before { content: "\f1a6"; } +.bi-bootstrap-reboot::before { content: "\f1a7"; } +.bi-bootstrap::before { content: "\f1a8"; } +.bi-border-all::before { content: "\f1a9"; } +.bi-border-bottom::before { content: "\f1aa"; } +.bi-border-center::before { content: "\f1ab"; } +.bi-border-inner::before { content: "\f1ac"; } +.bi-border-left::before { content: "\f1ad"; } +.bi-border-middle::before { content: "\f1ae"; } +.bi-border-outer::before { content: "\f1af"; } +.bi-border-right::before { content: "\f1b0"; } +.bi-border-style::before { content: "\f1b1"; } +.bi-border-top::before { content: "\f1b2"; } +.bi-border-width::before { content: "\f1b3"; } +.bi-border::before { content: "\f1b4"; } +.bi-bounding-box-circles::before { content: "\f1b5"; } +.bi-bounding-box::before { content: "\f1b6"; } +.bi-box-arrow-down-left::before { content: "\f1b7"; } +.bi-box-arrow-down-right::before { content: "\f1b8"; } +.bi-box-arrow-down::before { content: "\f1b9"; } +.bi-box-arrow-in-down-left::before { content: "\f1ba"; } +.bi-box-arrow-in-down-right::before { content: "\f1bb"; } +.bi-box-arrow-in-down::before { content: "\f1bc"; } +.bi-box-arrow-in-left::before { content: "\f1bd"; } +.bi-box-arrow-in-right::before { content: "\f1be"; } +.bi-box-arrow-in-up-left::before { content: "\f1bf"; } +.bi-box-arrow-in-up-right::before { content: "\f1c0"; } +.bi-box-arrow-in-up::before { content: "\f1c1"; } +.bi-box-arrow-left::before { content: "\f1c2"; } +.bi-box-arrow-right::before { content: "\f1c3"; } +.bi-box-arrow-up-left::before { content: "\f1c4"; } +.bi-box-arrow-up-right::before { content: "\f1c5"; } +.bi-box-arrow-up::before { content: "\f1c6"; } +.bi-box-seam::before { content: "\f1c7"; } +.bi-box::before { content: "\f1c8"; } +.bi-braces::before { content: "\f1c9"; } +.bi-bricks::before { content: "\f1ca"; } +.bi-briefcase-fill::before { content: "\f1cb"; } +.bi-briefcase::before { content: "\f1cc"; } +.bi-brightness-alt-high-fill::before { content: "\f1cd"; } +.bi-brightness-alt-high::before { content: "\f1ce"; } +.bi-brightness-alt-low-fill::before { content: "\f1cf"; } +.bi-brightness-alt-low::before { content: "\f1d0"; } +.bi-brightness-high-fill::before { content: "\f1d1"; } +.bi-brightness-high::before { content: "\f1d2"; } +.bi-brightness-low-fill::before { content: "\f1d3"; } +.bi-brightness-low::before { content: "\f1d4"; } +.bi-broadcast-pin::before { content: "\f1d5"; } +.bi-broadcast::before { content: "\f1d6"; } +.bi-brush-fill::before { content: "\f1d7"; } +.bi-brush::before { content: "\f1d8"; } +.bi-bucket-fill::before { content: "\f1d9"; } +.bi-bucket::before { content: "\f1da"; } +.bi-bug-fill::before { content: "\f1db"; } +.bi-bug::before { content: "\f1dc"; } +.bi-building::before { content: "\f1dd"; } +.bi-bullseye::before { content: "\f1de"; } +.bi-calculator-fill::before { content: "\f1df"; } +.bi-calculator::before { content: "\f1e0"; } +.bi-calendar-check-fill::before { content: "\f1e1"; } +.bi-calendar-check::before { content: "\f1e2"; } +.bi-calendar-date-fill::before { content: "\f1e3"; } +.bi-calendar-date::before { content: "\f1e4"; } +.bi-calendar-day-fill::before { content: "\f1e5"; } +.bi-calendar-day::before { content: "\f1e6"; } +.bi-calendar-event-fill::before { content: "\f1e7"; } +.bi-calendar-event::before { content: "\f1e8"; } +.bi-calendar-fill::before { content: "\f1e9"; } +.bi-calendar-minus-fill::before { content: "\f1ea"; } +.bi-calendar-minus::before { content: "\f1eb"; } +.bi-calendar-month-fill::before { content: "\f1ec"; } +.bi-calendar-month::before { content: "\f1ed"; } +.bi-calendar-plus-fill::before { content: "\f1ee"; } +.bi-calendar-plus::before { content: "\f1ef"; } +.bi-calendar-range-fill::before { content: "\f1f0"; } +.bi-calendar-range::before { content: "\f1f1"; } +.bi-calendar-week-fill::before { content: "\f1f2"; } +.bi-calendar-week::before { content: "\f1f3"; } +.bi-calendar-x-fill::before { content: "\f1f4"; } +.bi-calendar-x::before { content: "\f1f5"; } +.bi-calendar::before { content: "\f1f6"; } +.bi-calendar2-check-fill::before { content: "\f1f7"; } +.bi-calendar2-check::before { content: "\f1f8"; } +.bi-calendar2-date-fill::before { content: "\f1f9"; } +.bi-calendar2-date::before { content: "\f1fa"; } +.bi-calendar2-day-fill::before { content: "\f1fb"; } +.bi-calendar2-day::before { content: "\f1fc"; } +.bi-calendar2-event-fill::before { content: "\f1fd"; } +.bi-calendar2-event::before { content: "\f1fe"; } +.bi-calendar2-fill::before { content: "\f1ff"; } +.bi-calendar2-minus-fill::before { content: "\f200"; } +.bi-calendar2-minus::before { content: "\f201"; } +.bi-calendar2-month-fill::before { content: "\f202"; } +.bi-calendar2-month::before { content: "\f203"; } +.bi-calendar2-plus-fill::before { content: "\f204"; } +.bi-calendar2-plus::before { content: "\f205"; } +.bi-calendar2-range-fill::before { content: "\f206"; } +.bi-calendar2-range::before { content: "\f207"; } +.bi-calendar2-week-fill::before { content: "\f208"; } +.bi-calendar2-week::before { content: "\f209"; } +.bi-calendar2-x-fill::before { content: "\f20a"; } +.bi-calendar2-x::before { content: "\f20b"; } +.bi-calendar2::before { content: "\f20c"; } +.bi-calendar3-event-fill::before { content: "\f20d"; } +.bi-calendar3-event::before { content: "\f20e"; } +.bi-calendar3-fill::before { content: "\f20f"; } +.bi-calendar3-range-fill::before { content: "\f210"; } +.bi-calendar3-range::before { content: "\f211"; } +.bi-calendar3-week-fill::before { content: "\f212"; } +.bi-calendar3-week::before { content: "\f213"; } +.bi-calendar3::before { content: "\f214"; } +.bi-calendar4-event::before { content: "\f215"; } +.bi-calendar4-range::before { content: "\f216"; } +.bi-calendar4-week::before { content: "\f217"; } +.bi-calendar4::before { content: "\f218"; } +.bi-camera-fill::before { content: "\f219"; } +.bi-camera-reels-fill::before { content: "\f21a"; } +.bi-camera-reels::before { content: "\f21b"; } +.bi-camera-video-fill::before { content: "\f21c"; } +.bi-camera-video-off-fill::before { content: "\f21d"; } +.bi-camera-video-off::before { content: "\f21e"; } +.bi-camera-video::before { content: "\f21f"; } +.bi-camera::before { content: "\f220"; } +.bi-camera2::before { content: "\f221"; } +.bi-capslock-fill::before { content: "\f222"; } +.bi-capslock::before { content: "\f223"; } +.bi-card-checklist::before { content: "\f224"; } +.bi-card-heading::before { content: "\f225"; } +.bi-card-image::before { content: "\f226"; } +.bi-card-list::before { content: "\f227"; } +.bi-card-text::before { content: "\f228"; } +.bi-caret-down-fill::before { content: "\f229"; } +.bi-caret-down-square-fill::before { content: "\f22a"; } +.bi-caret-down-square::before { content: "\f22b"; } +.bi-caret-down::before { content: "\f22c"; } +.bi-caret-left-fill::before { content: "\f22d"; } +.bi-caret-left-square-fill::before { content: "\f22e"; } +.bi-caret-left-square::before { content: "\f22f"; } +.bi-caret-left::before { content: "\f230"; } +.bi-caret-right-fill::before { content: "\f231"; } +.bi-caret-right-square-fill::before { content: "\f232"; } +.bi-caret-right-square::before { content: "\f233"; } +.bi-caret-right::before { content: "\f234"; } +.bi-caret-up-fill::before { content: "\f235"; } +.bi-caret-up-square-fill::before { content: "\f236"; } +.bi-caret-up-square::before { content: "\f237"; } +.bi-caret-up::before { content: "\f238"; } +.bi-cart-check-fill::before { content: "\f239"; } +.bi-cart-check::before { content: "\f23a"; } +.bi-cart-dash-fill::before { content: "\f23b"; } +.bi-cart-dash::before { content: "\f23c"; } +.bi-cart-fill::before { content: "\f23d"; } +.bi-cart-plus-fill::before { content: "\f23e"; } +.bi-cart-plus::before { content: "\f23f"; } +.bi-cart-x-fill::before { content: "\f240"; } +.bi-cart-x::before { content: "\f241"; } +.bi-cart::before { content: "\f242"; } +.bi-cart2::before { content: "\f243"; } +.bi-cart3::before { content: "\f244"; } +.bi-cart4::before { content: "\f245"; } +.bi-cash-stack::before { content: "\f246"; } +.bi-cash::before { content: "\f247"; } +.bi-cast::before { content: "\f248"; } +.bi-chat-dots-fill::before { content: "\f249"; } +.bi-chat-dots::before { content: "\f24a"; } +.bi-chat-fill::before { content: "\f24b"; } +.bi-chat-left-dots-fill::before { content: "\f24c"; } +.bi-chat-left-dots::before { content: "\f24d"; } +.bi-chat-left-fill::before { content: "\f24e"; } +.bi-chat-left-quote-fill::before { content: "\f24f"; } +.bi-chat-left-quote::before { content: "\f250"; } +.bi-chat-left-text-fill::before { content: "\f251"; } +.bi-chat-left-text::before { content: "\f252"; } +.bi-chat-left::before { content: "\f253"; } +.bi-chat-quote-fill::before { content: "\f254"; } +.bi-chat-quote::before { content: "\f255"; } +.bi-chat-right-dots-fill::before { content: "\f256"; } +.bi-chat-right-dots::before { content: "\f257"; } +.bi-chat-right-fill::before { content: "\f258"; } +.bi-chat-right-quote-fill::before { content: "\f259"; } +.bi-chat-right-quote::before { content: "\f25a"; } +.bi-chat-right-text-fill::before { content: "\f25b"; } +.bi-chat-right-text::before { content: "\f25c"; } +.bi-chat-right::before { content: "\f25d"; } +.bi-chat-square-dots-fill::before { content: "\f25e"; } +.bi-chat-square-dots::before { content: "\f25f"; } +.bi-chat-square-fill::before { content: "\f260"; } +.bi-chat-square-quote-fill::before { content: "\f261"; } +.bi-chat-square-quote::before { content: "\f262"; } +.bi-chat-square-text-fill::before { content: "\f263"; } +.bi-chat-square-text::before { content: "\f264"; } +.bi-chat-square::before { content: "\f265"; } +.bi-chat-text-fill::before { content: "\f266"; } +.bi-chat-text::before { content: "\f267"; } +.bi-chat::before { content: "\f268"; } +.bi-check-all::before { content: "\f269"; } +.bi-check-circle-fill::before { content: "\f26a"; } +.bi-check-circle::before { content: "\f26b"; } +.bi-check-square-fill::before { content: "\f26c"; } +.bi-check-square::before { content: "\f26d"; } +.bi-check::before { content: "\f26e"; } +.bi-check2-all::before { content: "\f26f"; } +.bi-check2-circle::before { content: "\f270"; } +.bi-check2-square::before { content: "\f271"; } +.bi-check2::before { content: "\f272"; } +.bi-chevron-bar-contract::before { content: "\f273"; } +.bi-chevron-bar-down::before { content: "\f274"; } +.bi-chevron-bar-expand::before { content: "\f275"; } +.bi-chevron-bar-left::before { content: "\f276"; } +.bi-chevron-bar-right::before { content: "\f277"; } +.bi-chevron-bar-up::before { content: "\f278"; } +.bi-chevron-compact-down::before { content: "\f279"; } +.bi-chevron-compact-left::before { content: "\f27a"; } +.bi-chevron-compact-right::before { content: "\f27b"; } +.bi-chevron-compact-up::before { content: "\f27c"; } +.bi-chevron-contract::before { content: "\f27d"; } +.bi-chevron-double-down::before { content: "\f27e"; } +.bi-chevron-double-left::before { content: "\f27f"; } +.bi-chevron-double-right::before { content: "\f280"; } +.bi-chevron-double-up::before { content: "\f281"; } +.bi-chevron-down::before { content: "\f282"; } +.bi-chevron-expand::before { content: "\f283"; } +.bi-chevron-left::before { content: "\f284"; } +.bi-chevron-right::before { content: "\f285"; } +.bi-chevron-up::before { content: "\f286"; } +.bi-circle-fill::before { content: "\f287"; } +.bi-circle-half::before { content: "\f288"; } +.bi-circle-square::before { content: "\f289"; } +.bi-circle::before { content: "\f28a"; } +.bi-clipboard-check::before { content: "\f28b"; } +.bi-clipboard-data::before { content: "\f28c"; } +.bi-clipboard-minus::before { content: "\f28d"; } +.bi-clipboard-plus::before { content: "\f28e"; } +.bi-clipboard-x::before { content: "\f28f"; } +.bi-clipboard::before { content: "\f290"; } +.bi-clock-fill::before { content: "\f291"; } +.bi-clock-history::before { content: "\f292"; } +.bi-clock::before { content: "\f293"; } +.bi-cloud-arrow-down-fill::before { content: "\f294"; } +.bi-cloud-arrow-down::before { content: "\f295"; } +.bi-cloud-arrow-up-fill::before { content: "\f296"; } +.bi-cloud-arrow-up::before { content: "\f297"; } +.bi-cloud-check-fill::before { content: "\f298"; } +.bi-cloud-check::before { content: "\f299"; } +.bi-cloud-download-fill::before { content: "\f29a"; } +.bi-cloud-download::before { content: "\f29b"; } +.bi-cloud-drizzle-fill::before { content: "\f29c"; } +.bi-cloud-drizzle::before { content: "\f29d"; } +.bi-cloud-fill::before { content: "\f29e"; } +.bi-cloud-fog-fill::before { content: "\f29f"; } +.bi-cloud-fog::before { content: "\f2a0"; } +.bi-cloud-fog2-fill::before { content: "\f2a1"; } +.bi-cloud-fog2::before { content: "\f2a2"; } +.bi-cloud-hail-fill::before { content: "\f2a3"; } +.bi-cloud-hail::before { content: "\f2a4"; } +.bi-cloud-haze-fill::before { content: "\f2a6"; } +.bi-cloud-haze::before { content: "\f2a7"; } +.bi-cloud-haze2-fill::before { content: "\f2a8"; } +.bi-cloud-lightning-fill::before { content: "\f2a9"; } +.bi-cloud-lightning-rain-fill::before { content: "\f2aa"; } +.bi-cloud-lightning-rain::before { content: "\f2ab"; } +.bi-cloud-lightning::before { content: "\f2ac"; } +.bi-cloud-minus-fill::before { content: "\f2ad"; } +.bi-cloud-minus::before { content: "\f2ae"; } +.bi-cloud-moon-fill::before { content: "\f2af"; } +.bi-cloud-moon::before { content: "\f2b0"; } +.bi-cloud-plus-fill::before { content: "\f2b1"; } +.bi-cloud-plus::before { content: "\f2b2"; } +.bi-cloud-rain-fill::before { content: "\f2b3"; } +.bi-cloud-rain-heavy-fill::before { content: "\f2b4"; } +.bi-cloud-rain-heavy::before { content: "\f2b5"; } +.bi-cloud-rain::before { content: "\f2b6"; } +.bi-cloud-slash-fill::before { content: "\f2b7"; } +.bi-cloud-slash::before { content: "\f2b8"; } +.bi-cloud-sleet-fill::before { content: "\f2b9"; } +.bi-cloud-sleet::before { content: "\f2ba"; } +.bi-cloud-snow-fill::before { content: "\f2bb"; } +.bi-cloud-snow::before { content: "\f2bc"; } +.bi-cloud-sun-fill::before { content: "\f2bd"; } +.bi-cloud-sun::before { content: "\f2be"; } +.bi-cloud-upload-fill::before { content: "\f2bf"; } +.bi-cloud-upload::before { content: "\f2c0"; } +.bi-cloud::before { content: "\f2c1"; } +.bi-clouds-fill::before { content: "\f2c2"; } +.bi-clouds::before { content: "\f2c3"; } +.bi-cloudy-fill::before { content: "\f2c4"; } +.bi-cloudy::before { content: "\f2c5"; } +.bi-code-slash::before { content: "\f2c6"; } +.bi-code-square::before { content: "\f2c7"; } +.bi-code::before { content: "\f2c8"; } +.bi-collection-fill::before { content: "\f2c9"; } +.bi-collection-play-fill::before { content: "\f2ca"; } +.bi-collection-play::before { content: "\f2cb"; } +.bi-collection::before { content: "\f2cc"; } +.bi-columns-gap::before { content: "\f2cd"; } +.bi-columns::before { content: "\f2ce"; } +.bi-command::before { content: "\f2cf"; } +.bi-compass-fill::before { content: "\f2d0"; } +.bi-compass::before { content: "\f2d1"; } +.bi-cone-striped::before { content: "\f2d2"; } +.bi-cone::before { content: "\f2d3"; } +.bi-controller::before { content: "\f2d4"; } +.bi-cpu-fill::before { content: "\f2d5"; } +.bi-cpu::before { content: "\f2d6"; } +.bi-credit-card-2-back-fill::before { content: "\f2d7"; } +.bi-credit-card-2-back::before { content: "\f2d8"; } +.bi-credit-card-2-front-fill::before { content: "\f2d9"; } +.bi-credit-card-2-front::before { content: "\f2da"; } +.bi-credit-card-fill::before { content: "\f2db"; } +.bi-credit-card::before { content: "\f2dc"; } +.bi-crop::before { content: "\f2dd"; } +.bi-cup-fill::before { content: "\f2de"; } +.bi-cup-straw::before { content: "\f2df"; } +.bi-cup::before { content: "\f2e0"; } +.bi-cursor-fill::before { content: "\f2e1"; } +.bi-cursor-text::before { content: "\f2e2"; } +.bi-cursor::before { content: "\f2e3"; } +.bi-dash-circle-dotted::before { content: "\f2e4"; } +.bi-dash-circle-fill::before { content: "\f2e5"; } +.bi-dash-circle::before { content: "\f2e6"; } +.bi-dash-square-dotted::before { content: "\f2e7"; } +.bi-dash-square-fill::before { content: "\f2e8"; } +.bi-dash-square::before { content: "\f2e9"; } +.bi-dash::before { content: "\f2ea"; } +.bi-diagram-2-fill::before { content: "\f2eb"; } +.bi-diagram-2::before { content: "\f2ec"; } +.bi-diagram-3-fill::before { content: "\f2ed"; } +.bi-diagram-3::before { content: "\f2ee"; } +.bi-diamond-fill::before { content: "\f2ef"; } +.bi-diamond-half::before { content: "\f2f0"; } +.bi-diamond::before { content: "\f2f1"; } +.bi-dice-1-fill::before { content: "\f2f2"; } +.bi-dice-1::before { content: "\f2f3"; } +.bi-dice-2-fill::before { content: "\f2f4"; } +.bi-dice-2::before { content: "\f2f5"; } +.bi-dice-3-fill::before { content: "\f2f6"; } +.bi-dice-3::before { content: "\f2f7"; } +.bi-dice-4-fill::before { content: "\f2f8"; } +.bi-dice-4::before { content: "\f2f9"; } +.bi-dice-5-fill::before { content: "\f2fa"; } +.bi-dice-5::before { content: "\f2fb"; } +.bi-dice-6-fill::before { content: "\f2fc"; } +.bi-dice-6::before { content: "\f2fd"; } +.bi-disc-fill::before { content: "\f2fe"; } +.bi-disc::before { content: "\f2ff"; } +.bi-discord::before { content: "\f300"; } +.bi-display-fill::before { content: "\f301"; } +.bi-display::before { content: "\f302"; } +.bi-distribute-horizontal::before { content: "\f303"; } +.bi-distribute-vertical::before { content: "\f304"; } +.bi-door-closed-fill::before { content: "\f305"; } +.bi-door-closed::before { content: "\f306"; } +.bi-door-open-fill::before { content: "\f307"; } +.bi-door-open::before { content: "\f308"; } +.bi-dot::before { content: "\f309"; } +.bi-download::before { content: "\f30a"; } +.bi-droplet-fill::before { content: "\f30b"; } +.bi-droplet-half::before { content: "\f30c"; } +.bi-droplet::before { content: "\f30d"; } +.bi-earbuds::before { content: "\f30e"; } +.bi-easel-fill::before { content: "\f30f"; } +.bi-easel::before { content: "\f310"; } +.bi-egg-fill::before { content: "\f311"; } +.bi-egg-fried::before { content: "\f312"; } +.bi-egg::before { content: "\f313"; } +.bi-eject-fill::before { content: "\f314"; } +.bi-eject::before { content: "\f315"; } +.bi-emoji-angry-fill::before { content: "\f316"; } +.bi-emoji-angry::before { content: "\f317"; } +.bi-emoji-dizzy-fill::before { content: "\f318"; } +.bi-emoji-dizzy::before { content: "\f319"; } +.bi-emoji-expressionless-fill::before { content: "\f31a"; } +.bi-emoji-expressionless::before { content: "\f31b"; } +.bi-emoji-frown-fill::before { content: "\f31c"; } +.bi-emoji-frown::before { content: "\f31d"; } +.bi-emoji-heart-eyes-fill::before { content: "\f31e"; } +.bi-emoji-heart-eyes::before { content: "\f31f"; } +.bi-emoji-laughing-fill::before { content: "\f320"; } +.bi-emoji-laughing::before { content: "\f321"; } +.bi-emoji-neutral-fill::before { content: "\f322"; } +.bi-emoji-neutral::before { content: "\f323"; } +.bi-emoji-smile-fill::before { content: "\f324"; } +.bi-emoji-smile-upside-down-fill::before { content: "\f325"; } +.bi-emoji-smile-upside-down::before { content: "\f326"; } +.bi-emoji-smile::before { content: "\f327"; } +.bi-emoji-sunglasses-fill::before { content: "\f328"; } +.bi-emoji-sunglasses::before { content: "\f329"; } +.bi-emoji-wink-fill::before { content: "\f32a"; } +.bi-emoji-wink::before { content: "\f32b"; } +.bi-envelope-fill::before { content: "\f32c"; } +.bi-envelope-open-fill::before { content: "\f32d"; } +.bi-envelope-open::before { content: "\f32e"; } +.bi-envelope::before { content: "\f32f"; } +.bi-eraser-fill::before { content: "\f330"; } +.bi-eraser::before { content: "\f331"; } +.bi-exclamation-circle-fill::before { content: "\f332"; } +.bi-exclamation-circle::before { content: "\f333"; } +.bi-exclamation-diamond-fill::before { content: "\f334"; } +.bi-exclamation-diamond::before { content: "\f335"; } +.bi-exclamation-octagon-fill::before { content: "\f336"; } +.bi-exclamation-octagon::before { content: "\f337"; } +.bi-exclamation-square-fill::before { content: "\f338"; } +.bi-exclamation-square::before { content: "\f339"; } +.bi-exclamation-triangle-fill::before { content: "\f33a"; } +.bi-exclamation-triangle::before { content: "\f33b"; } +.bi-exclamation::before { content: "\f33c"; } +.bi-exclude::before { content: "\f33d"; } +.bi-eye-fill::before { content: "\f33e"; } +.bi-eye-slash-fill::before { content: "\f33f"; } +.bi-eye-slash::before { content: "\f340"; } +.bi-eye::before { content: "\f341"; } +.bi-eyedropper::before { content: "\f342"; } +.bi-eyeglasses::before { content: "\f343"; } +.bi-facebook::before { content: "\f344"; } +.bi-file-arrow-down-fill::before { content: "\f345"; } +.bi-file-arrow-down::before { content: "\f346"; } +.bi-file-arrow-up-fill::before { content: "\f347"; } +.bi-file-arrow-up::before { content: "\f348"; } +.bi-file-bar-graph-fill::before { content: "\f349"; } +.bi-file-bar-graph::before { content: "\f34a"; } +.bi-file-binary-fill::before { content: "\f34b"; } +.bi-file-binary::before { content: "\f34c"; } +.bi-file-break-fill::before { content: "\f34d"; } +.bi-file-break::before { content: "\f34e"; } +.bi-file-check-fill::before { content: "\f34f"; } +.bi-file-check::before { content: "\f350"; } +.bi-file-code-fill::before { content: "\f351"; } +.bi-file-code::before { content: "\f352"; } +.bi-file-diff-fill::before { content: "\f353"; } +.bi-file-diff::before { content: "\f354"; } +.bi-file-earmark-arrow-down-fill::before { content: "\f355"; } +.bi-file-earmark-arrow-down::before { content: "\f356"; } +.bi-file-earmark-arrow-up-fill::before { content: "\f357"; } +.bi-file-earmark-arrow-up::before { content: "\f358"; } +.bi-file-earmark-bar-graph-fill::before { content: "\f359"; } +.bi-file-earmark-bar-graph::before { content: "\f35a"; } +.bi-file-earmark-binary-fill::before { content: "\f35b"; } +.bi-file-earmark-binary::before { content: "\f35c"; } +.bi-file-earmark-break-fill::before { content: "\f35d"; } +.bi-file-earmark-break::before { content: "\f35e"; } +.bi-file-earmark-check-fill::before { content: "\f35f"; } +.bi-file-earmark-check::before { content: "\f360"; } +.bi-file-earmark-code-fill::before { content: "\f361"; } +.bi-file-earmark-code::before { content: "\f362"; } +.bi-file-earmark-diff-fill::before { content: "\f363"; } +.bi-file-earmark-diff::before { content: "\f364"; } +.bi-file-earmark-easel-fill::before { content: "\f365"; } +.bi-file-earmark-easel::before { content: "\f366"; } +.bi-file-earmark-excel-fill::before { content: "\f367"; } +.bi-file-earmark-excel::before { content: "\f368"; } +.bi-file-earmark-fill::before { content: "\f369"; } +.bi-file-earmark-font-fill::before { content: "\f36a"; } +.bi-file-earmark-font::before { content: "\f36b"; } +.bi-file-earmark-image-fill::before { content: "\f36c"; } +.bi-file-earmark-image::before { content: "\f36d"; } +.bi-file-earmark-lock-fill::before { content: "\f36e"; } +.bi-file-earmark-lock::before { content: "\f36f"; } +.bi-file-earmark-lock2-fill::before { content: "\f370"; } +.bi-file-earmark-lock2::before { content: "\f371"; } +.bi-file-earmark-medical-fill::before { content: "\f372"; } +.bi-file-earmark-medical::before { content: "\f373"; } +.bi-file-earmark-minus-fill::before { content: "\f374"; } +.bi-file-earmark-minus::before { content: "\f375"; } +.bi-file-earmark-music-fill::before { content: "\f376"; } +.bi-file-earmark-music::before { content: "\f377"; } +.bi-file-earmark-person-fill::before { content: "\f378"; } +.bi-file-earmark-person::before { content: "\f379"; } +.bi-file-earmark-play-fill::before { content: "\f37a"; } +.bi-file-earmark-play::before { content: "\f37b"; } +.bi-file-earmark-plus-fill::before { content: "\f37c"; } +.bi-file-earmark-plus::before { content: "\f37d"; } +.bi-file-earmark-post-fill::before { content: "\f37e"; } +.bi-file-earmark-post::before { content: "\f37f"; } +.bi-file-earmark-ppt-fill::before { content: "\f380"; } +.bi-file-earmark-ppt::before { content: "\f381"; } +.bi-file-earmark-richtext-fill::before { content: "\f382"; } +.bi-file-earmark-richtext::before { content: "\f383"; } +.bi-file-earmark-ruled-fill::before { content: "\f384"; } +.bi-file-earmark-ruled::before { content: "\f385"; } +.bi-file-earmark-slides-fill::before { content: "\f386"; } +.bi-file-earmark-slides::before { content: "\f387"; } +.bi-file-earmark-spreadsheet-fill::before { content: "\f388"; } +.bi-file-earmark-spreadsheet::before { content: "\f389"; } +.bi-file-earmark-text-fill::before { content: "\f38a"; } +.bi-file-earmark-text::before { content: "\f38b"; } +.bi-file-earmark-word-fill::before { content: "\f38c"; } +.bi-file-earmark-word::before { content: "\f38d"; } +.bi-file-earmark-x-fill::before { content: "\f38e"; } +.bi-file-earmark-x::before { content: "\f38f"; } +.bi-file-earmark-zip-fill::before { content: "\f390"; } +.bi-file-earmark-zip::before { content: "\f391"; } +.bi-file-earmark::before { content: "\f392"; } +.bi-file-easel-fill::before { content: "\f393"; } +.bi-file-easel::before { content: "\f394"; } +.bi-file-excel-fill::before { content: "\f395"; } +.bi-file-excel::before { content: "\f396"; } +.bi-file-fill::before { content: "\f397"; } +.bi-file-font-fill::before { content: "\f398"; } +.bi-file-font::before { content: "\f399"; } +.bi-file-image-fill::before { content: "\f39a"; } +.bi-file-image::before { content: "\f39b"; } +.bi-file-lock-fill::before { content: "\f39c"; } +.bi-file-lock::before { content: "\f39d"; } +.bi-file-lock2-fill::before { content: "\f39e"; } +.bi-file-lock2::before { content: "\f39f"; } +.bi-file-medical-fill::before { content: "\f3a0"; } +.bi-file-medical::before { content: "\f3a1"; } +.bi-file-minus-fill::before { content: "\f3a2"; } +.bi-file-minus::before { content: "\f3a3"; } +.bi-file-music-fill::before { content: "\f3a4"; } +.bi-file-music::before { content: "\f3a5"; } +.bi-file-person-fill::before { content: "\f3a6"; } +.bi-file-person::before { content: "\f3a7"; } +.bi-file-play-fill::before { content: "\f3a8"; } +.bi-file-play::before { content: "\f3a9"; } +.bi-file-plus-fill::before { content: "\f3aa"; } +.bi-file-plus::before { content: "\f3ab"; } +.bi-file-post-fill::before { content: "\f3ac"; } +.bi-file-post::before { content: "\f3ad"; } +.bi-file-ppt-fill::before { content: "\f3ae"; } +.bi-file-ppt::before { content: "\f3af"; } +.bi-file-richtext-fill::before { content: "\f3b0"; } +.bi-file-richtext::before { content: "\f3b1"; } +.bi-file-ruled-fill::before { content: "\f3b2"; } +.bi-file-ruled::before { content: "\f3b3"; } +.bi-file-slides-fill::before { content: "\f3b4"; } +.bi-file-slides::before { content: "\f3b5"; } +.bi-file-spreadsheet-fill::before { content: "\f3b6"; } +.bi-file-spreadsheet::before { content: "\f3b7"; } +.bi-file-text-fill::before { content: "\f3b8"; } +.bi-file-text::before { content: "\f3b9"; } +.bi-file-word-fill::before { content: "\f3ba"; } +.bi-file-word::before { content: "\f3bb"; } +.bi-file-x-fill::before { content: "\f3bc"; } +.bi-file-x::before { content: "\f3bd"; } +.bi-file-zip-fill::before { content: "\f3be"; } +.bi-file-zip::before { content: "\f3bf"; } +.bi-file::before { content: "\f3c0"; } +.bi-files-alt::before { content: "\f3c1"; } +.bi-files::before { content: "\f3c2"; } +.bi-film::before { content: "\f3c3"; } +.bi-filter-circle-fill::before { content: "\f3c4"; } +.bi-filter-circle::before { content: "\f3c5"; } +.bi-filter-left::before { content: "\f3c6"; } +.bi-filter-right::before { content: "\f3c7"; } +.bi-filter-square-fill::before { content: "\f3c8"; } +.bi-filter-square::before { content: "\f3c9"; } +.bi-filter::before { content: "\f3ca"; } +.bi-flag-fill::before { content: "\f3cb"; } +.bi-flag::before { content: "\f3cc"; } +.bi-flower1::before { content: "\f3cd"; } +.bi-flower2::before { content: "\f3ce"; } +.bi-flower3::before { content: "\f3cf"; } +.bi-folder-check::before { content: "\f3d0"; } +.bi-folder-fill::before { content: "\f3d1"; } +.bi-folder-minus::before { content: "\f3d2"; } +.bi-folder-plus::before { content: "\f3d3"; } +.bi-folder-symlink-fill::before { content: "\f3d4"; } +.bi-folder-symlink::before { content: "\f3d5"; } +.bi-folder-x::before { content: "\f3d6"; } +.bi-folder::before { content: "\f3d7"; } +.bi-folder2-open::before { content: "\f3d8"; } +.bi-folder2::before { content: "\f3d9"; } +.bi-fonts::before { content: "\f3da"; } +.bi-forward-fill::before { content: "\f3db"; } +.bi-forward::before { content: "\f3dc"; } +.bi-front::before { content: "\f3dd"; } +.bi-fullscreen-exit::before { content: "\f3de"; } +.bi-fullscreen::before { content: "\f3df"; } +.bi-funnel-fill::before { content: "\f3e0"; } +.bi-funnel::before { content: "\f3e1"; } +.bi-gear-fill::before { content: "\f3e2"; } +.bi-gear-wide-connected::before { content: "\f3e3"; } +.bi-gear-wide::before { content: "\f3e4"; } +.bi-gear::before { content: "\f3e5"; } +.bi-gem::before { content: "\f3e6"; } +.bi-geo-alt-fill::before { content: "\f3e7"; } +.bi-geo-alt::before { content: "\f3e8"; } +.bi-geo-fill::before { content: "\f3e9"; } +.bi-geo::before { content: "\f3ea"; } +.bi-gift-fill::before { content: "\f3eb"; } +.bi-gift::before { content: "\f3ec"; } +.bi-github::before { content: "\f3ed"; } +.bi-globe::before { content: "\f3ee"; } +.bi-globe2::before { content: "\f3ef"; } +.bi-google::before { content: "\f3f0"; } +.bi-graph-down::before { content: "\f3f1"; } +.bi-graph-up::before { content: "\f3f2"; } +.bi-grid-1x2-fill::before { content: "\f3f3"; } +.bi-grid-1x2::before { content: "\f3f4"; } +.bi-grid-3x2-gap-fill::before { content: "\f3f5"; } +.bi-grid-3x2-gap::before { content: "\f3f6"; } +.bi-grid-3x2::before { content: "\f3f7"; } +.bi-grid-3x3-gap-fill::before { content: "\f3f8"; } +.bi-grid-3x3-gap::before { content: "\f3f9"; } +.bi-grid-3x3::before { content: "\f3fa"; } +.bi-grid-fill::before { content: "\f3fb"; } +.bi-grid::before { content: "\f3fc"; } +.bi-grip-horizontal::before { content: "\f3fd"; } +.bi-grip-vertical::before { content: "\f3fe"; } +.bi-hammer::before { content: "\f3ff"; } +.bi-hand-index-fill::before { content: "\f400"; } +.bi-hand-index-thumb-fill::before { content: "\f401"; } +.bi-hand-index-thumb::before { content: "\f402"; } +.bi-hand-index::before { content: "\f403"; } +.bi-hand-thumbs-down-fill::before { content: "\f404"; } +.bi-hand-thumbs-down::before { content: "\f405"; } +.bi-hand-thumbs-up-fill::before { content: "\f406"; } +.bi-hand-thumbs-up::before { content: "\f407"; } +.bi-handbag-fill::before { content: "\f408"; } +.bi-handbag::before { content: "\f409"; } +.bi-hash::before { content: "\f40a"; } +.bi-hdd-fill::before { content: "\f40b"; } +.bi-hdd-network-fill::before { content: "\f40c"; } +.bi-hdd-network::before { content: "\f40d"; } +.bi-hdd-rack-fill::before { content: "\f40e"; } +.bi-hdd-rack::before { content: "\f40f"; } +.bi-hdd-stack-fill::before { content: "\f410"; } +.bi-hdd-stack::before { content: "\f411"; } +.bi-hdd::before { content: "\f412"; } +.bi-headphones::before { content: "\f413"; } +.bi-headset::before { content: "\f414"; } +.bi-heart-fill::before { content: "\f415"; } +.bi-heart-half::before { content: "\f416"; } +.bi-heart::before { content: "\f417"; } +.bi-heptagon-fill::before { content: "\f418"; } +.bi-heptagon-half::before { content: "\f419"; } +.bi-heptagon::before { content: "\f41a"; } +.bi-hexagon-fill::before { content: "\f41b"; } +.bi-hexagon-half::before { content: "\f41c"; } +.bi-hexagon::before { content: "\f41d"; } +.bi-hourglass-bottom::before { content: "\f41e"; } +.bi-hourglass-split::before { content: "\f41f"; } +.bi-hourglass-top::before { content: "\f420"; } +.bi-hourglass::before { content: "\f421"; } +.bi-house-door-fill::before { content: "\f422"; } +.bi-house-door::before { content: "\f423"; } +.bi-house-fill::before { content: "\f424"; } +.bi-house::before { content: "\f425"; } +.bi-hr::before { content: "\f426"; } +.bi-hurricane::before { content: "\f427"; } +.bi-image-alt::before { content: "\f428"; } +.bi-image-fill::before { content: "\f429"; } +.bi-image::before { content: "\f42a"; } +.bi-images::before { content: "\f42b"; } +.bi-inbox-fill::before { content: "\f42c"; } +.bi-inbox::before { content: "\f42d"; } +.bi-inboxes-fill::before { content: "\f42e"; } +.bi-inboxes::before { content: "\f42f"; } +.bi-info-circle-fill::before { content: "\f430"; } +.bi-info-circle::before { content: "\f431"; } +.bi-info-square-fill::before { content: "\f432"; } +.bi-info-square::before { content: "\f433"; } +.bi-info::before { content: "\f434"; } +.bi-input-cursor-text::before { content: "\f435"; } +.bi-input-cursor::before { content: "\f436"; } +.bi-instagram::before { content: "\f437"; } +.bi-intersect::before { content: "\f438"; } +.bi-journal-album::before { content: "\f439"; } +.bi-journal-arrow-down::before { content: "\f43a"; } +.bi-journal-arrow-up::before { content: "\f43b"; } +.bi-journal-bookmark-fill::before { content: "\f43c"; } +.bi-journal-bookmark::before { content: "\f43d"; } +.bi-journal-check::before { content: "\f43e"; } +.bi-journal-code::before { content: "\f43f"; } +.bi-journal-medical::before { content: "\f440"; } +.bi-journal-minus::before { content: "\f441"; } +.bi-journal-plus::before { content: "\f442"; } +.bi-journal-richtext::before { content: "\f443"; } +.bi-journal-text::before { content: "\f444"; } +.bi-journal-x::before { content: "\f445"; } +.bi-journal::before { content: "\f446"; } +.bi-journals::before { content: "\f447"; } +.bi-joystick::before { content: "\f448"; } +.bi-justify-left::before { content: "\f449"; } +.bi-justify-right::before { content: "\f44a"; } +.bi-justify::before { content: "\f44b"; } +.bi-kanban-fill::before { content: "\f44c"; } +.bi-kanban::before { content: "\f44d"; } +.bi-key-fill::before { content: "\f44e"; } +.bi-key::before { content: "\f44f"; } +.bi-keyboard-fill::before { content: "\f450"; } +.bi-keyboard::before { content: "\f451"; } +.bi-ladder::before { content: "\f452"; } +.bi-lamp-fill::before { content: "\f453"; } +.bi-lamp::before { content: "\f454"; } +.bi-laptop-fill::before { content: "\f455"; } +.bi-laptop::before { content: "\f456"; } +.bi-layer-backward::before { content: "\f457"; } +.bi-layer-forward::before { content: "\f458"; } +.bi-layers-fill::before { content: "\f459"; } +.bi-layers-half::before { content: "\f45a"; } +.bi-layers::before { content: "\f45b"; } +.bi-layout-sidebar-inset-reverse::before { content: "\f45c"; } +.bi-layout-sidebar-inset::before { content: "\f45d"; } +.bi-layout-sidebar-reverse::before { content: "\f45e"; } +.bi-layout-sidebar::before { content: "\f45f"; } +.bi-layout-split::before { content: "\f460"; } +.bi-layout-text-sidebar-reverse::before { content: "\f461"; } +.bi-layout-text-sidebar::before { content: "\f462"; } +.bi-layout-text-window-reverse::before { content: "\f463"; } +.bi-layout-text-window::before { content: "\f464"; } +.bi-layout-three-columns::before { content: "\f465"; } +.bi-layout-wtf::before { content: "\f466"; } +.bi-life-preserver::before { content: "\f467"; } +.bi-lightbulb-fill::before { content: "\f468"; } +.bi-lightbulb-off-fill::before { content: "\f469"; } +.bi-lightbulb-off::before { content: "\f46a"; } +.bi-lightbulb::before { content: "\f46b"; } +.bi-lightning-charge-fill::before { content: "\f46c"; } +.bi-lightning-charge::before { content: "\f46d"; } +.bi-lightning-fill::before { content: "\f46e"; } +.bi-lightning::before { content: "\f46f"; } +.bi-link-45deg::before { content: "\f470"; } +.bi-link::before { content: "\f471"; } +.bi-linkedin::before { content: "\f472"; } +.bi-list-check::before { content: "\f473"; } +.bi-list-nested::before { content: "\f474"; } +.bi-list-ol::before { content: "\f475"; } +.bi-list-stars::before { content: "\f476"; } +.bi-list-task::before { content: "\f477"; } +.bi-list-ul::before { content: "\f478"; } +.bi-list::before { content: "\f479"; } +.bi-lock-fill::before { content: "\f47a"; } +.bi-lock::before { content: "\f47b"; } +.bi-mailbox::before { content: "\f47c"; } +.bi-mailbox2::before { content: "\f47d"; } +.bi-map-fill::before { content: "\f47e"; } +.bi-map::before { content: "\f47f"; } +.bi-markdown-fill::before { content: "\f480"; } +.bi-markdown::before { content: "\f481"; } +.bi-mask::before { content: "\f482"; } +.bi-megaphone-fill::before { content: "\f483"; } +.bi-megaphone::before { content: "\f484"; } +.bi-menu-app-fill::before { content: "\f485"; } +.bi-menu-app::before { content: "\f486"; } +.bi-menu-button-fill::before { content: "\f487"; } +.bi-menu-button-wide-fill::before { content: "\f488"; } +.bi-menu-button-wide::before { content: "\f489"; } +.bi-menu-button::before { content: "\f48a"; } +.bi-menu-down::before { content: "\f48b"; } +.bi-menu-up::before { content: "\f48c"; } +.bi-mic-fill::before { content: "\f48d"; } +.bi-mic-mute-fill::before { content: "\f48e"; } +.bi-mic-mute::before { content: "\f48f"; } +.bi-mic::before { content: "\f490"; } +.bi-minecart-loaded::before { content: "\f491"; } +.bi-minecart::before { content: "\f492"; } +.bi-moisture::before { content: "\f493"; } +.bi-moon-fill::before { content: "\f494"; } +.bi-moon-stars-fill::before { content: "\f495"; } +.bi-moon-stars::before { content: "\f496"; } +.bi-moon::before { content: "\f497"; } +.bi-mouse-fill::before { content: "\f498"; } +.bi-mouse::before { content: "\f499"; } +.bi-mouse2-fill::before { content: "\f49a"; } +.bi-mouse2::before { content: "\f49b"; } +.bi-mouse3-fill::before { content: "\f49c"; } +.bi-mouse3::before { content: "\f49d"; } +.bi-music-note-beamed::before { content: "\f49e"; } +.bi-music-note-list::before { content: "\f49f"; } +.bi-music-note::before { content: "\f4a0"; } +.bi-music-player-fill::before { content: "\f4a1"; } +.bi-music-player::before { content: "\f4a2"; } +.bi-newspaper::before { content: "\f4a3"; } +.bi-node-minus-fill::before { content: "\f4a4"; } +.bi-node-minus::before { content: "\f4a5"; } +.bi-node-plus-fill::before { content: "\f4a6"; } +.bi-node-plus::before { content: "\f4a7"; } +.bi-nut-fill::before { content: "\f4a8"; } +.bi-nut::before { content: "\f4a9"; } +.bi-octagon-fill::before { content: "\f4aa"; } +.bi-octagon-half::before { content: "\f4ab"; } +.bi-octagon::before { content: "\f4ac"; } +.bi-option::before { content: "\f4ad"; } +.bi-outlet::before { content: "\f4ae"; } +.bi-paint-bucket::before { content: "\f4af"; } +.bi-palette-fill::before { content: "\f4b0"; } +.bi-palette::before { content: "\f4b1"; } +.bi-palette2::before { content: "\f4b2"; } +.bi-paperclip::before { content: "\f4b3"; } +.bi-paragraph::before { content: "\f4b4"; } +.bi-patch-check-fill::before { content: "\f4b5"; } +.bi-patch-check::before { content: "\f4b6"; } +.bi-patch-exclamation-fill::before { content: "\f4b7"; } +.bi-patch-exclamation::before { content: "\f4b8"; } +.bi-patch-minus-fill::before { content: "\f4b9"; } +.bi-patch-minus::before { content: "\f4ba"; } +.bi-patch-plus-fill::before { content: "\f4bb"; } +.bi-patch-plus::before { content: "\f4bc"; } +.bi-patch-question-fill::before { content: "\f4bd"; } +.bi-patch-question::before { content: "\f4be"; } +.bi-pause-btn-fill::before { content: "\f4bf"; } +.bi-pause-btn::before { content: "\f4c0"; } +.bi-pause-circle-fill::before { content: "\f4c1"; } +.bi-pause-circle::before { content: "\f4c2"; } +.bi-pause-fill::before { content: "\f4c3"; } +.bi-pause::before { content: "\f4c4"; } +.bi-peace-fill::before { content: "\f4c5"; } +.bi-peace::before { content: "\f4c6"; } +.bi-pen-fill::before { content: "\f4c7"; } +.bi-pen::before { content: "\f4c8"; } +.bi-pencil-fill::before { content: "\f4c9"; } +.bi-pencil-square::before { content: "\f4ca"; } +.bi-pencil::before { content: "\f4cb"; } +.bi-pentagon-fill::before { content: "\f4cc"; } +.bi-pentagon-half::before { content: "\f4cd"; } +.bi-pentagon::before { content: "\f4ce"; } +.bi-people-fill::before { content: "\f4cf"; } +.bi-people::before { content: "\f4d0"; } +.bi-percent::before { content: "\f4d1"; } +.bi-person-badge-fill::before { content: "\f4d2"; } +.bi-person-badge::before { content: "\f4d3"; } +.bi-person-bounding-box::before { content: "\f4d4"; } +.bi-person-check-fill::before { content: "\f4d5"; } +.bi-person-check::before { content: "\f4d6"; } +.bi-person-circle::before { content: "\f4d7"; } +.bi-person-dash-fill::before { content: "\f4d8"; } +.bi-person-dash::before { content: "\f4d9"; } +.bi-person-fill::before { content: "\f4da"; } +.bi-person-lines-fill::before { content: "\f4db"; } +.bi-person-plus-fill::before { content: "\f4dc"; } +.bi-person-plus::before { content: "\f4dd"; } +.bi-person-square::before { content: "\f4de"; } +.bi-person-x-fill::before { content: "\f4df"; } +.bi-person-x::before { content: "\f4e0"; } +.bi-person::before { content: "\f4e1"; } +.bi-phone-fill::before { content: "\f4e2"; } +.bi-phone-landscape-fill::before { content: "\f4e3"; } +.bi-phone-landscape::before { content: "\f4e4"; } +.bi-phone-vibrate-fill::before { content: "\f4e5"; } +.bi-phone-vibrate::before { content: "\f4e6"; } +.bi-phone::before { content: "\f4e7"; } +.bi-pie-chart-fill::before { content: "\f4e8"; } +.bi-pie-chart::before { content: "\f4e9"; } +.bi-pin-angle-fill::before { content: "\f4ea"; } +.bi-pin-angle::before { content: "\f4eb"; } +.bi-pin-fill::before { content: "\f4ec"; } +.bi-pin::before { content: "\f4ed"; } +.bi-pip-fill::before { content: "\f4ee"; } +.bi-pip::before { content: "\f4ef"; } +.bi-play-btn-fill::before { content: "\f4f0"; } +.bi-play-btn::before { content: "\f4f1"; } +.bi-play-circle-fill::before { content: "\f4f2"; } +.bi-play-circle::before { content: "\f4f3"; } +.bi-play-fill::before { content: "\f4f4"; } +.bi-play::before { content: "\f4f5"; } +.bi-plug-fill::before { content: "\f4f6"; } +.bi-plug::before { content: "\f4f7"; } +.bi-plus-circle-dotted::before { content: "\f4f8"; } +.bi-plus-circle-fill::before { content: "\f4f9"; } +.bi-plus-circle::before { content: "\f4fa"; } +.bi-plus-square-dotted::before { content: "\f4fb"; } +.bi-plus-square-fill::before { content: "\f4fc"; } +.bi-plus-square::before { content: "\f4fd"; } +.bi-plus::before { content: "\f4fe"; } +.bi-power::before { content: "\f4ff"; } +.bi-printer-fill::before { content: "\f500"; } +.bi-printer::before { content: "\f501"; } +.bi-puzzle-fill::before { content: "\f502"; } +.bi-puzzle::before { content: "\f503"; } +.bi-question-circle-fill::before { content: "\f504"; } +.bi-question-circle::before { content: "\f505"; } +.bi-question-diamond-fill::before { content: "\f506"; } +.bi-question-diamond::before { content: "\f507"; } +.bi-question-octagon-fill::before { content: "\f508"; } +.bi-question-octagon::before { content: "\f509"; } +.bi-question-square-fill::before { content: "\f50a"; } +.bi-question-square::before { content: "\f50b"; } +.bi-question::before { content: "\f50c"; } +.bi-rainbow::before { content: "\f50d"; } +.bi-receipt-cutoff::before { content: "\f50e"; } +.bi-receipt::before { content: "\f50f"; } +.bi-reception-0::before { content: "\f510"; } +.bi-reception-1::before { content: "\f511"; } +.bi-reception-2::before { content: "\f512"; } +.bi-reception-3::before { content: "\f513"; } +.bi-reception-4::before { content: "\f514"; } +.bi-record-btn-fill::before { content: "\f515"; } +.bi-record-btn::before { content: "\f516"; } +.bi-record-circle-fill::before { content: "\f517"; } +.bi-record-circle::before { content: "\f518"; } +.bi-record-fill::before { content: "\f519"; } +.bi-record::before { content: "\f51a"; } +.bi-record2-fill::before { content: "\f51b"; } +.bi-record2::before { content: "\f51c"; } +.bi-reply-all-fill::before { content: "\f51d"; } +.bi-reply-all::before { content: "\f51e"; } +.bi-reply-fill::before { content: "\f51f"; } +.bi-reply::before { content: "\f520"; } +.bi-rss-fill::before { content: "\f521"; } +.bi-rss::before { content: "\f522"; } +.bi-rulers::before { content: "\f523"; } +.bi-save-fill::before { content: "\f524"; } +.bi-save::before { content: "\f525"; } +.bi-save2-fill::before { content: "\f526"; } +.bi-save2::before { content: "\f527"; } +.bi-scissors::before { content: "\f528"; } +.bi-screwdriver::before { content: "\f529"; } +.bi-search::before { content: "\f52a"; } +.bi-segmented-nav::before { content: "\f52b"; } +.bi-server::before { content: "\f52c"; } +.bi-share-fill::before { content: "\f52d"; } +.bi-share::before { content: "\f52e"; } +.bi-shield-check::before { content: "\f52f"; } +.bi-shield-exclamation::before { content: "\f530"; } +.bi-shield-fill-check::before { content: "\f531"; } +.bi-shield-fill-exclamation::before { content: "\f532"; } +.bi-shield-fill-minus::before { content: "\f533"; } +.bi-shield-fill-plus::before { content: "\f534"; } +.bi-shield-fill-x::before { content: "\f535"; } +.bi-shield-fill::before { content: "\f536"; } +.bi-shield-lock-fill::before { content: "\f537"; } +.bi-shield-lock::before { content: "\f538"; } +.bi-shield-minus::before { content: "\f539"; } +.bi-shield-plus::before { content: "\f53a"; } +.bi-shield-shaded::before { content: "\f53b"; } +.bi-shield-slash-fill::before { content: "\f53c"; } +.bi-shield-slash::before { content: "\f53d"; } +.bi-shield-x::before { content: "\f53e"; } +.bi-shield::before { content: "\f53f"; } +.bi-shift-fill::before { content: "\f540"; } +.bi-shift::before { content: "\f541"; } +.bi-shop-window::before { content: "\f542"; } +.bi-shop::before { content: "\f543"; } +.bi-shuffle::before { content: "\f544"; } +.bi-signpost-2-fill::before { content: "\f545"; } +.bi-signpost-2::before { content: "\f546"; } +.bi-signpost-fill::before { content: "\f547"; } +.bi-signpost-split-fill::before { content: "\f548"; } +.bi-signpost-split::before { content: "\f549"; } +.bi-signpost::before { content: "\f54a"; } +.bi-sim-fill::before { content: "\f54b"; } +.bi-sim::before { content: "\f54c"; } +.bi-skip-backward-btn-fill::before { content: "\f54d"; } +.bi-skip-backward-btn::before { content: "\f54e"; } +.bi-skip-backward-circle-fill::before { content: "\f54f"; } +.bi-skip-backward-circle::before { content: "\f550"; } +.bi-skip-backward-fill::before { content: "\f551"; } +.bi-skip-backward::before { content: "\f552"; } +.bi-skip-end-btn-fill::before { content: "\f553"; } +.bi-skip-end-btn::before { content: "\f554"; } +.bi-skip-end-circle-fill::before { content: "\f555"; } +.bi-skip-end-circle::before { content: "\f556"; } +.bi-skip-end-fill::before { content: "\f557"; } +.bi-skip-end::before { content: "\f558"; } +.bi-skip-forward-btn-fill::before { content: "\f559"; } +.bi-skip-forward-btn::before { content: "\f55a"; } +.bi-skip-forward-circle-fill::before { content: "\f55b"; } +.bi-skip-forward-circle::before { content: "\f55c"; } +.bi-skip-forward-fill::before { content: "\f55d"; } +.bi-skip-forward::before { content: "\f55e"; } +.bi-skip-start-btn-fill::before { content: "\f55f"; } +.bi-skip-start-btn::before { content: "\f560"; } +.bi-skip-start-circle-fill::before { content: "\f561"; } +.bi-skip-start-circle::before { content: "\f562"; } +.bi-skip-start-fill::before { content: "\f563"; } +.bi-skip-start::before { content: "\f564"; } +.bi-slack::before { content: "\f565"; } +.bi-slash-circle-fill::before { content: "\f566"; } +.bi-slash-circle::before { content: "\f567"; } +.bi-slash-square-fill::before { content: "\f568"; } +.bi-slash-square::before { content: "\f569"; } +.bi-slash::before { content: "\f56a"; } +.bi-sliders::before { content: "\f56b"; } +.bi-smartwatch::before { content: "\f56c"; } +.bi-snow::before { content: "\f56d"; } +.bi-snow2::before { content: "\f56e"; } +.bi-snow3::before { content: "\f56f"; } +.bi-sort-alpha-down-alt::before { content: "\f570"; } +.bi-sort-alpha-down::before { content: "\f571"; } +.bi-sort-alpha-up-alt::before { content: "\f572"; } +.bi-sort-alpha-up::before { content: "\f573"; } +.bi-sort-down-alt::before { content: "\f574"; } +.bi-sort-down::before { content: "\f575"; } +.bi-sort-numeric-down-alt::before { content: "\f576"; } +.bi-sort-numeric-down::before { content: "\f577"; } +.bi-sort-numeric-up-alt::before { content: "\f578"; } +.bi-sort-numeric-up::before { content: "\f579"; } +.bi-sort-up-alt::before { content: "\f57a"; } +.bi-sort-up::before { content: "\f57b"; } +.bi-soundwave::before { content: "\f57c"; } +.bi-speaker-fill::before { content: "\f57d"; } +.bi-speaker::before { content: "\f57e"; } +.bi-speedometer::before { content: "\f57f"; } +.bi-speedometer2::before { content: "\f580"; } +.bi-spellcheck::before { content: "\f581"; } +.bi-square-fill::before { content: "\f582"; } +.bi-square-half::before { content: "\f583"; } +.bi-square::before { content: "\f584"; } +.bi-stack::before { content: "\f585"; } +.bi-star-fill::before { content: "\f586"; } +.bi-star-half::before { content: "\f587"; } +.bi-star::before { content: "\f588"; } +.bi-stars::before { content: "\f589"; } +.bi-stickies-fill::before { content: "\f58a"; } +.bi-stickies::before { content: "\f58b"; } +.bi-sticky-fill::before { content: "\f58c"; } +.bi-sticky::before { content: "\f58d"; } +.bi-stop-btn-fill::before { content: "\f58e"; } +.bi-stop-btn::before { content: "\f58f"; } +.bi-stop-circle-fill::before { content: "\f590"; } +.bi-stop-circle::before { content: "\f591"; } +.bi-stop-fill::before { content: "\f592"; } +.bi-stop::before { content: "\f593"; } +.bi-stoplights-fill::before { content: "\f594"; } +.bi-stoplights::before { content: "\f595"; } +.bi-stopwatch-fill::before { content: "\f596"; } +.bi-stopwatch::before { content: "\f597"; } +.bi-subtract::before { content: "\f598"; } +.bi-suit-club-fill::before { content: "\f599"; } +.bi-suit-club::before { content: "\f59a"; } +.bi-suit-diamond-fill::before { content: "\f59b"; } +.bi-suit-diamond::before { content: "\f59c"; } +.bi-suit-heart-fill::before { content: "\f59d"; } +.bi-suit-heart::before { content: "\f59e"; } +.bi-suit-spade-fill::before { content: "\f59f"; } +.bi-suit-spade::before { content: "\f5a0"; } +.bi-sun-fill::before { content: "\f5a1"; } +.bi-sun::before { content: "\f5a2"; } +.bi-sunglasses::before { content: "\f5a3"; } +.bi-sunrise-fill::before { content: "\f5a4"; } +.bi-sunrise::before { content: "\f5a5"; } +.bi-sunset-fill::before { content: "\f5a6"; } +.bi-sunset::before { content: "\f5a7"; } +.bi-symmetry-horizontal::before { content: "\f5a8"; } +.bi-symmetry-vertical::before { content: "\f5a9"; } +.bi-table::before { content: "\f5aa"; } +.bi-tablet-fill::before { content: "\f5ab"; } +.bi-tablet-landscape-fill::before { content: "\f5ac"; } +.bi-tablet-landscape::before { content: "\f5ad"; } +.bi-tablet::before { content: "\f5ae"; } +.bi-tag-fill::before { content: "\f5af"; } +.bi-tag::before { content: "\f5b0"; } +.bi-tags-fill::before { content: "\f5b1"; } +.bi-tags::before { content: "\f5b2"; } +.bi-telegram::before { content: "\f5b3"; } +.bi-telephone-fill::before { content: "\f5b4"; } +.bi-telephone-forward-fill::before { content: "\f5b5"; } +.bi-telephone-forward::before { content: "\f5b6"; } +.bi-telephone-inbound-fill::before { content: "\f5b7"; } +.bi-telephone-inbound::before { content: "\f5b8"; } +.bi-telephone-minus-fill::before { content: "\f5b9"; } +.bi-telephone-minus::before { content: "\f5ba"; } +.bi-telephone-outbound-fill::before { content: "\f5bb"; } +.bi-telephone-outbound::before { content: "\f5bc"; } +.bi-telephone-plus-fill::before { content: "\f5bd"; } +.bi-telephone-plus::before { content: "\f5be"; } +.bi-telephone-x-fill::before { content: "\f5bf"; } +.bi-telephone-x::before { content: "\f5c0"; } +.bi-telephone::before { content: "\f5c1"; } +.bi-terminal-fill::before { content: "\f5c2"; } +.bi-terminal::before { content: "\f5c3"; } +.bi-text-center::before { content: "\f5c4"; } +.bi-text-indent-left::before { content: "\f5c5"; } +.bi-text-indent-right::before { content: "\f5c6"; } +.bi-text-left::before { content: "\f5c7"; } +.bi-text-paragraph::before { content: "\f5c8"; } +.bi-text-right::before { content: "\f5c9"; } +.bi-textarea-resize::before { content: "\f5ca"; } +.bi-textarea-t::before { content: "\f5cb"; } +.bi-textarea::before { content: "\f5cc"; } +.bi-thermometer-half::before { content: "\f5cd"; } +.bi-thermometer-high::before { content: "\f5ce"; } +.bi-thermometer-low::before { content: "\f5cf"; } +.bi-thermometer-snow::before { content: "\f5d0"; } +.bi-thermometer-sun::before { content: "\f5d1"; } +.bi-thermometer::before { content: "\f5d2"; } +.bi-three-dots-vertical::before { content: "\f5d3"; } +.bi-three-dots::before { content: "\f5d4"; } +.bi-toggle-off::before { content: "\f5d5"; } +.bi-toggle-on::before { content: "\f5d6"; } +.bi-toggle2-off::before { content: "\f5d7"; } +.bi-toggle2-on::before { content: "\f5d8"; } +.bi-toggles::before { content: "\f5d9"; } +.bi-toggles2::before { content: "\f5da"; } +.bi-tools::before { content: "\f5db"; } +.bi-tornado::before { content: "\f5dc"; } +.bi-trash-fill::before { content: "\f5dd"; } +.bi-trash::before { content: "\f5de"; } +.bi-trash2-fill::before { content: "\f5df"; } +.bi-trash2::before { content: "\f5e0"; } +.bi-tree-fill::before { content: "\f5e1"; } +.bi-tree::before { content: "\f5e2"; } +.bi-triangle-fill::before { content: "\f5e3"; } +.bi-triangle-half::before { content: "\f5e4"; } +.bi-triangle::before { content: "\f5e5"; } +.bi-trophy-fill::before { content: "\f5e6"; } +.bi-trophy::before { content: "\f5e7"; } +.bi-tropical-storm::before { content: "\f5e8"; } +.bi-truck-flatbed::before { content: "\f5e9"; } +.bi-truck::before { content: "\f5ea"; } +.bi-tsunami::before { content: "\f5eb"; } +.bi-tv-fill::before { content: "\f5ec"; } +.bi-tv::before { content: "\f5ed"; } +.bi-twitch::before { content: "\f5ee"; } +.bi-twitter::before { content: "\f5ef"; } +.bi-type-bold::before { content: "\f5f0"; } +.bi-type-h1::before { content: "\f5f1"; } +.bi-type-h2::before { content: "\f5f2"; } +.bi-type-h3::before { content: "\f5f3"; } +.bi-type-italic::before { content: "\f5f4"; } +.bi-type-strikethrough::before { content: "\f5f5"; } +.bi-type-underline::before { content: "\f5f6"; } +.bi-type::before { content: "\f5f7"; } +.bi-ui-checks-grid::before { content: "\f5f8"; } +.bi-ui-checks::before { content: "\f5f9"; } +.bi-ui-radios-grid::before { content: "\f5fa"; } +.bi-ui-radios::before { content: "\f5fb"; } +.bi-umbrella-fill::before { content: "\f5fc"; } +.bi-umbrella::before { content: "\f5fd"; } +.bi-union::before { content: "\f5fe"; } +.bi-unlock-fill::before { content: "\f5ff"; } +.bi-unlock::before { content: "\f600"; } +.bi-upc-scan::before { content: "\f601"; } +.bi-upc::before { content: "\f602"; } +.bi-upload::before { content: "\f603"; } +.bi-vector-pen::before { content: "\f604"; } +.bi-view-list::before { content: "\f605"; } +.bi-view-stacked::before { content: "\f606"; } +.bi-vinyl-fill::before { content: "\f607"; } +.bi-vinyl::before { content: "\f608"; } +.bi-voicemail::before { content: "\f609"; } +.bi-volume-down-fill::before { content: "\f60a"; } +.bi-volume-down::before { content: "\f60b"; } +.bi-volume-mute-fill::before { content: "\f60c"; } +.bi-volume-mute::before { content: "\f60d"; } +.bi-volume-off-fill::before { content: "\f60e"; } +.bi-volume-off::before { content: "\f60f"; } +.bi-volume-up-fill::before { content: "\f610"; } +.bi-volume-up::before { content: "\f611"; } +.bi-vr::before { content: "\f612"; } +.bi-wallet-fill::before { content: "\f613"; } +.bi-wallet::before { content: "\f614"; } +.bi-wallet2::before { content: "\f615"; } +.bi-watch::before { content: "\f616"; } +.bi-water::before { content: "\f617"; } +.bi-whatsapp::before { content: "\f618"; } +.bi-wifi-1::before { content: "\f619"; } +.bi-wifi-2::before { content: "\f61a"; } +.bi-wifi-off::before { content: "\f61b"; } +.bi-wifi::before { content: "\f61c"; } +.bi-wind::before { content: "\f61d"; } +.bi-window-dock::before { content: "\f61e"; } +.bi-window-sidebar::before { content: "\f61f"; } +.bi-window::before { content: "\f620"; } +.bi-wrench::before { content: "\f621"; } +.bi-x-circle-fill::before { content: "\f622"; } +.bi-x-circle::before { content: "\f623"; } +.bi-x-diamond-fill::before { content: "\f624"; } +.bi-x-diamond::before { content: "\f625"; } +.bi-x-octagon-fill::before { content: "\f626"; } +.bi-x-octagon::before { content: "\f627"; } +.bi-x-square-fill::before { content: "\f628"; } +.bi-x-square::before { content: "\f629"; } +.bi-x::before { content: "\f62a"; } +.bi-youtube::before { content: "\f62b"; } +.bi-zoom-in::before { content: "\f62c"; } +.bi-zoom-out::before { content: "\f62d"; } +.bi-bank::before { content: "\f62e"; } +.bi-bank2::before { content: "\f62f"; } +.bi-bell-slash-fill::before { content: "\f630"; } +.bi-bell-slash::before { content: "\f631"; } +.bi-cash-coin::before { content: "\f632"; } +.bi-check-lg::before { content: "\f633"; } +.bi-coin::before { content: "\f634"; } +.bi-currency-bitcoin::before { content: "\f635"; } +.bi-currency-dollar::before { content: "\f636"; } +.bi-currency-euro::before { content: "\f637"; } +.bi-currency-exchange::before { content: "\f638"; } +.bi-currency-pound::before { content: "\f639"; } +.bi-currency-yen::before { content: "\f63a"; } +.bi-dash-lg::before { content: "\f63b"; } +.bi-exclamation-lg::before { content: "\f63c"; } +.bi-file-earmark-pdf-fill::before { content: "\f63d"; } +.bi-file-earmark-pdf::before { content: "\f63e"; } +.bi-file-pdf-fill::before { content: "\f63f"; } +.bi-file-pdf::before { content: "\f640"; } +.bi-gender-ambiguous::before { content: "\f641"; } +.bi-gender-female::before { content: "\f642"; } +.bi-gender-male::before { content: "\f643"; } +.bi-gender-trans::before { content: "\f644"; } +.bi-headset-vr::before { content: "\f645"; } +.bi-info-lg::before { content: "\f646"; } +.bi-mastodon::before { content: "\f647"; } +.bi-messenger::before { content: "\f648"; } +.bi-piggy-bank-fill::before { content: "\f649"; } +.bi-piggy-bank::before { content: "\f64a"; } +.bi-pin-map-fill::before { content: "\f64b"; } +.bi-pin-map::before { content: "\f64c"; } +.bi-plus-lg::before { content: "\f64d"; } +.bi-question-lg::before { content: "\f64e"; } +.bi-recycle::before { content: "\f64f"; } +.bi-reddit::before { content: "\f650"; } +.bi-safe-fill::before { content: "\f651"; } +.bi-safe2-fill::before { content: "\f652"; } +.bi-safe2::before { content: "\f653"; } +.bi-sd-card-fill::before { content: "\f654"; } +.bi-sd-card::before { content: "\f655"; } +.bi-skype::before { content: "\f656"; } +.bi-slash-lg::before { content: "\f657"; } +.bi-translate::before { content: "\f658"; } +.bi-x-lg::before { content: "\f659"; } +.bi-safe::before { content: "\f65a"; } +.bi-apple::before { content: "\f65b"; } +.bi-microsoft::before { content: "\f65d"; } +.bi-windows::before { content: "\f65e"; } +.bi-behance::before { content: "\f65c"; } +.bi-dribbble::before { content: "\f65f"; } +.bi-line::before { content: "\f660"; } +.bi-medium::before { content: "\f661"; } +.bi-paypal::before { content: "\f662"; } +.bi-pinterest::before { content: "\f663"; } +.bi-signal::before { content: "\f664"; } +.bi-snapchat::before { content: "\f665"; } +.bi-spotify::before { content: "\f666"; } +.bi-stack-overflow::before { content: "\f667"; } +.bi-strava::before { content: "\f668"; } +.bi-wordpress::before { content: "\f669"; } +.bi-vimeo::before { content: "\f66a"; } +.bi-activity::before { content: "\f66b"; } +.bi-easel2-fill::before { content: "\f66c"; } +.bi-easel2::before { content: "\f66d"; } +.bi-easel3-fill::before { content: "\f66e"; } +.bi-easel3::before { content: "\f66f"; } +.bi-fan::before { content: "\f670"; } +.bi-fingerprint::before { content: "\f671"; } +.bi-graph-down-arrow::before { content: "\f672"; } +.bi-graph-up-arrow::before { content: "\f673"; } +.bi-hypnotize::before { content: "\f674"; } +.bi-magic::before { content: "\f675"; } +.bi-person-rolodex::before { content: "\f676"; } +.bi-person-video::before { content: "\f677"; } +.bi-person-video2::before { content: "\f678"; } +.bi-person-video3::before { content: "\f679"; } +.bi-person-workspace::before { content: "\f67a"; } +.bi-radioactive::before { content: "\f67b"; } +.bi-webcam-fill::before { content: "\f67c"; } +.bi-webcam::before { content: "\f67d"; } +.bi-yin-yang::before { content: "\f67e"; } +.bi-bandaid-fill::before { content: "\f680"; } +.bi-bandaid::before { content: "\f681"; } +.bi-bluetooth::before { content: "\f682"; } +.bi-body-text::before { content: "\f683"; } +.bi-boombox::before { content: "\f684"; } +.bi-boxes::before { content: "\f685"; } +.bi-dpad-fill::before { content: "\f686"; } +.bi-dpad::before { content: "\f687"; } +.bi-ear-fill::before { content: "\f688"; } +.bi-ear::before { content: "\f689"; } +.bi-envelope-check-fill::before { content: "\f68b"; } +.bi-envelope-check::before { content: "\f68c"; } +.bi-envelope-dash-fill::before { content: "\f68e"; } +.bi-envelope-dash::before { content: "\f68f"; } +.bi-envelope-exclamation-fill::before { content: "\f691"; } +.bi-envelope-exclamation::before { content: "\f692"; } +.bi-envelope-plus-fill::before { content: "\f693"; } +.bi-envelope-plus::before { content: "\f694"; } +.bi-envelope-slash-fill::before { content: "\f696"; } +.bi-envelope-slash::before { content: "\f697"; } +.bi-envelope-x-fill::before { content: "\f699"; } +.bi-envelope-x::before { content: "\f69a"; } +.bi-explicit-fill::before { content: "\f69b"; } +.bi-explicit::before { content: "\f69c"; } +.bi-git::before { content: "\f69d"; } +.bi-infinity::before { content: "\f69e"; } +.bi-list-columns-reverse::before { content: "\f69f"; } +.bi-list-columns::before { content: "\f6a0"; } +.bi-meta::before { content: "\f6a1"; } +.bi-nintendo-switch::before { content: "\f6a4"; } +.bi-pc-display-horizontal::before { content: "\f6a5"; } +.bi-pc-display::before { content: "\f6a6"; } +.bi-pc-horizontal::before { content: "\f6a7"; } +.bi-pc::before { content: "\f6a8"; } +.bi-playstation::before { content: "\f6a9"; } +.bi-plus-slash-minus::before { content: "\f6aa"; } +.bi-projector-fill::before { content: "\f6ab"; } +.bi-projector::before { content: "\f6ac"; } +.bi-qr-code-scan::before { content: "\f6ad"; } +.bi-qr-code::before { content: "\f6ae"; } +.bi-quora::before { content: "\f6af"; } +.bi-quote::before { content: "\f6b0"; } +.bi-robot::before { content: "\f6b1"; } +.bi-send-check-fill::before { content: "\f6b2"; } +.bi-send-check::before { content: "\f6b3"; } +.bi-send-dash-fill::before { content: "\f6b4"; } +.bi-send-dash::before { content: "\f6b5"; } +.bi-send-exclamation-fill::before { content: "\f6b7"; } +.bi-send-exclamation::before { content: "\f6b8"; } +.bi-send-fill::before { content: "\f6b9"; } +.bi-send-plus-fill::before { content: "\f6ba"; } +.bi-send-plus::before { content: "\f6bb"; } +.bi-send-slash-fill::before { content: "\f6bc"; } +.bi-send-slash::before { content: "\f6bd"; } +.bi-send-x-fill::before { content: "\f6be"; } +.bi-send-x::before { content: "\f6bf"; } +.bi-send::before { content: "\f6c0"; } +.bi-steam::before { content: "\f6c1"; } +.bi-terminal-dash::before { content: "\f6c3"; } +.bi-terminal-plus::before { content: "\f6c4"; } +.bi-terminal-split::before { content: "\f6c5"; } +.bi-ticket-detailed-fill::before { content: "\f6c6"; } +.bi-ticket-detailed::before { content: "\f6c7"; } +.bi-ticket-fill::before { content: "\f6c8"; } +.bi-ticket-perforated-fill::before { content: "\f6c9"; } +.bi-ticket-perforated::before { content: "\f6ca"; } +.bi-ticket::before { content: "\f6cb"; } +.bi-tiktok::before { content: "\f6cc"; } +.bi-window-dash::before { content: "\f6cd"; } +.bi-window-desktop::before { content: "\f6ce"; } +.bi-window-fullscreen::before { content: "\f6cf"; } +.bi-window-plus::before { content: "\f6d0"; } +.bi-window-split::before { content: "\f6d1"; } +.bi-window-stack::before { content: "\f6d2"; } +.bi-window-x::before { content: "\f6d3"; } +.bi-xbox::before { content: "\f6d4"; } +.bi-ethernet::before { content: "\f6d5"; } +.bi-hdmi-fill::before { content: "\f6d6"; } +.bi-hdmi::before { content: "\f6d7"; } +.bi-usb-c-fill::before { content: "\f6d8"; } +.bi-usb-c::before { content: "\f6d9"; } +.bi-usb-fill::before { content: "\f6da"; } +.bi-usb-plug-fill::before { content: "\f6db"; } +.bi-usb-plug::before { content: "\f6dc"; } +.bi-usb-symbol::before { content: "\f6dd"; } +.bi-usb::before { content: "\f6de"; } +.bi-boombox-fill::before { content: "\f6df"; } +.bi-displayport::before { content: "\f6e1"; } +.bi-gpu-card::before { content: "\f6e2"; } +.bi-memory::before { content: "\f6e3"; } +.bi-modem-fill::before { content: "\f6e4"; } +.bi-modem::before { content: "\f6e5"; } +.bi-motherboard-fill::before { content: "\f6e6"; } +.bi-motherboard::before { content: "\f6e7"; } +.bi-optical-audio-fill::before { content: "\f6e8"; } +.bi-optical-audio::before { content: "\f6e9"; } +.bi-pci-card::before { content: "\f6ea"; } +.bi-router-fill::before { content: "\f6eb"; } +.bi-router::before { content: "\f6ec"; } +.bi-thunderbolt-fill::before { content: "\f6ef"; } +.bi-thunderbolt::before { content: "\f6f0"; } +.bi-usb-drive-fill::before { content: "\f6f1"; } +.bi-usb-drive::before { content: "\f6f2"; } +.bi-usb-micro-fill::before { content: "\f6f3"; } +.bi-usb-micro::before { content: "\f6f4"; } +.bi-usb-mini-fill::before { content: "\f6f5"; } +.bi-usb-mini::before { content: "\f6f6"; } +.bi-cloud-haze2::before { content: "\f6f7"; } +.bi-device-hdd-fill::before { content: "\f6f8"; } +.bi-device-hdd::before { content: "\f6f9"; } +.bi-device-ssd-fill::before { content: "\f6fa"; } +.bi-device-ssd::before { content: "\f6fb"; } +.bi-displayport-fill::before { content: "\f6fc"; } +.bi-mortarboard-fill::before { content: "\f6fd"; } +.bi-mortarboard::before { content: "\f6fe"; } +.bi-terminal-x::before { content: "\f6ff"; } +.bi-arrow-through-heart-fill::before { content: "\f700"; } +.bi-arrow-through-heart::before { content: "\f701"; } +.bi-badge-sd-fill::before { content: "\f702"; } +.bi-badge-sd::before { content: "\f703"; } +.bi-bag-heart-fill::before { content: "\f704"; } +.bi-bag-heart::before { content: "\f705"; } +.bi-balloon-fill::before { content: "\f706"; } +.bi-balloon-heart-fill::before { content: "\f707"; } +.bi-balloon-heart::before { content: "\f708"; } +.bi-balloon::before { content: "\f709"; } +.bi-box2-fill::before { content: "\f70a"; } +.bi-box2-heart-fill::before { content: "\f70b"; } +.bi-box2-heart::before { content: "\f70c"; } +.bi-box2::before { content: "\f70d"; } +.bi-braces-asterisk::before { content: "\f70e"; } +.bi-calendar-heart-fill::before { content: "\f70f"; } +.bi-calendar-heart::before { content: "\f710"; } +.bi-calendar2-heart-fill::before { content: "\f711"; } +.bi-calendar2-heart::before { content: "\f712"; } +.bi-chat-heart-fill::before { content: "\f713"; } +.bi-chat-heart::before { content: "\f714"; } +.bi-chat-left-heart-fill::before { content: "\f715"; } +.bi-chat-left-heart::before { content: "\f716"; } +.bi-chat-right-heart-fill::before { content: "\f717"; } +.bi-chat-right-heart::before { content: "\f718"; } +.bi-chat-square-heart-fill::before { content: "\f719"; } +.bi-chat-square-heart::before { content: "\f71a"; } +.bi-clipboard-check-fill::before { content: "\f71b"; } +.bi-clipboard-data-fill::before { content: "\f71c"; } +.bi-clipboard-fill::before { content: "\f71d"; } +.bi-clipboard-heart-fill::before { content: "\f71e"; } +.bi-clipboard-heart::before { content: "\f71f"; } +.bi-clipboard-minus-fill::before { content: "\f720"; } +.bi-clipboard-plus-fill::before { content: "\f721"; } +.bi-clipboard-pulse::before { content: "\f722"; } +.bi-clipboard-x-fill::before { content: "\f723"; } +.bi-clipboard2-check-fill::before { content: "\f724"; } +.bi-clipboard2-check::before { content: "\f725"; } +.bi-clipboard2-data-fill::before { content: "\f726"; } +.bi-clipboard2-data::before { content: "\f727"; } +.bi-clipboard2-fill::before { content: "\f728"; } +.bi-clipboard2-heart-fill::before { content: "\f729"; } +.bi-clipboard2-heart::before { content: "\f72a"; } +.bi-clipboard2-minus-fill::before { content: "\f72b"; } +.bi-clipboard2-minus::before { content: "\f72c"; } +.bi-clipboard2-plus-fill::before { content: "\f72d"; } +.bi-clipboard2-plus::before { content: "\f72e"; } +.bi-clipboard2-pulse-fill::before { content: "\f72f"; } +.bi-clipboard2-pulse::before { content: "\f730"; } +.bi-clipboard2-x-fill::before { content: "\f731"; } +.bi-clipboard2-x::before { content: "\f732"; } +.bi-clipboard2::before { content: "\f733"; } +.bi-emoji-kiss-fill::before { content: "\f734"; } +.bi-emoji-kiss::before { content: "\f735"; } +.bi-envelope-heart-fill::before { content: "\f736"; } +.bi-envelope-heart::before { content: "\f737"; } +.bi-envelope-open-heart-fill::before { content: "\f738"; } +.bi-envelope-open-heart::before { content: "\f739"; } +.bi-envelope-paper-fill::before { content: "\f73a"; } +.bi-envelope-paper-heart-fill::before { content: "\f73b"; } +.bi-envelope-paper-heart::before { content: "\f73c"; } +.bi-envelope-paper::before { content: "\f73d"; } +.bi-filetype-aac::before { content: "\f73e"; } +.bi-filetype-ai::before { content: "\f73f"; } +.bi-filetype-bmp::before { content: "\f740"; } +.bi-filetype-cs::before { content: "\f741"; } +.bi-filetype-css::before { content: "\f742"; } +.bi-filetype-csv::before { content: "\f743"; } +.bi-filetype-doc::before { content: "\f744"; } +.bi-filetype-docx::before { content: "\f745"; } +.bi-filetype-exe::before { content: "\f746"; } +.bi-filetype-gif::before { content: "\f747"; } +.bi-filetype-heic::before { content: "\f748"; } +.bi-filetype-html::before { content: "\f749"; } +.bi-filetype-java::before { content: "\f74a"; } +.bi-filetype-jpg::before { content: "\f74b"; } +.bi-filetype-js::before { content: "\f74c"; } +.bi-filetype-jsx::before { content: "\f74d"; } +.bi-filetype-key::before { content: "\f74e"; } +.bi-filetype-m4p::before { content: "\f74f"; } +.bi-filetype-md::before { content: "\f750"; } +.bi-filetype-mdx::before { content: "\f751"; } +.bi-filetype-mov::before { content: "\f752"; } +.bi-filetype-mp3::before { content: "\f753"; } +.bi-filetype-mp4::before { content: "\f754"; } +.bi-filetype-otf::before { content: "\f755"; } +.bi-filetype-pdf::before { content: "\f756"; } +.bi-filetype-php::before { content: "\f757"; } +.bi-filetype-png::before { content: "\f758"; } +.bi-filetype-ppt::before { content: "\f75a"; } +.bi-filetype-psd::before { content: "\f75b"; } +.bi-filetype-py::before { content: "\f75c"; } +.bi-filetype-raw::before { content: "\f75d"; } +.bi-filetype-rb::before { content: "\f75e"; } +.bi-filetype-sass::before { content: "\f75f"; } +.bi-filetype-scss::before { content: "\f760"; } +.bi-filetype-sh::before { content: "\f761"; } +.bi-filetype-svg::before { content: "\f762"; } +.bi-filetype-tiff::before { content: "\f763"; } +.bi-filetype-tsx::before { content: "\f764"; } +.bi-filetype-ttf::before { content: "\f765"; } +.bi-filetype-txt::before { content: "\f766"; } +.bi-filetype-wav::before { content: "\f767"; } +.bi-filetype-woff::before { content: "\f768"; } +.bi-filetype-xls::before { content: "\f76a"; } +.bi-filetype-xml::before { content: "\f76b"; } +.bi-filetype-yml::before { content: "\f76c"; } +.bi-heart-arrow::before { content: "\f76d"; } +.bi-heart-pulse-fill::before { content: "\f76e"; } +.bi-heart-pulse::before { content: "\f76f"; } +.bi-heartbreak-fill::before { content: "\f770"; } +.bi-heartbreak::before { content: "\f771"; } +.bi-hearts::before { content: "\f772"; } +.bi-hospital-fill::before { content: "\f773"; } +.bi-hospital::before { content: "\f774"; } +.bi-house-heart-fill::before { content: "\f775"; } +.bi-house-heart::before { content: "\f776"; } +.bi-incognito::before { content: "\f777"; } +.bi-magnet-fill::before { content: "\f778"; } +.bi-magnet::before { content: "\f779"; } +.bi-person-heart::before { content: "\f77a"; } +.bi-person-hearts::before { content: "\f77b"; } +.bi-phone-flip::before { content: "\f77c"; } +.bi-plugin::before { content: "\f77d"; } +.bi-postage-fill::before { content: "\f77e"; } +.bi-postage-heart-fill::before { content: "\f77f"; } +.bi-postage-heart::before { content: "\f780"; } +.bi-postage::before { content: "\f781"; } +.bi-postcard-fill::before { content: "\f782"; } +.bi-postcard-heart-fill::before { content: "\f783"; } +.bi-postcard-heart::before { content: "\f784"; } +.bi-postcard::before { content: "\f785"; } +.bi-search-heart-fill::before { content: "\f786"; } +.bi-search-heart::before { content: "\f787"; } +.bi-sliders2-vertical::before { content: "\f788"; } +.bi-sliders2::before { content: "\f789"; } +.bi-trash3-fill::before { content: "\f78a"; } +.bi-trash3::before { content: "\f78b"; } +.bi-valentine::before { content: "\f78c"; } +.bi-valentine2::before { content: "\f78d"; } +.bi-wrench-adjustable-circle-fill::before { content: "\f78e"; } +.bi-wrench-adjustable-circle::before { content: "\f78f"; } +.bi-wrench-adjustable::before { content: "\f790"; } +.bi-filetype-json::before { content: "\f791"; } +.bi-filetype-pptx::before { content: "\f792"; } +.bi-filetype-xlsx::before { content: "\f793"; } +.bi-1-circle-fill::before { content: "\f796"; } +.bi-1-circle::before { content: "\f797"; } +.bi-1-square-fill::before { content: "\f798"; } +.bi-1-square::before { content: "\f799"; } +.bi-2-circle-fill::before { content: "\f79c"; } +.bi-2-circle::before { content: "\f79d"; } +.bi-2-square-fill::before { content: "\f79e"; } +.bi-2-square::before { content: "\f79f"; } +.bi-3-circle-fill::before { content: "\f7a2"; } +.bi-3-circle::before { content: "\f7a3"; } +.bi-3-square-fill::before { content: "\f7a4"; } +.bi-3-square::before { content: "\f7a5"; } +.bi-4-circle-fill::before { content: "\f7a8"; } +.bi-4-circle::before { content: "\f7a9"; } +.bi-4-square-fill::before { content: "\f7aa"; } +.bi-4-square::before { content: "\f7ab"; } +.bi-5-circle-fill::before { content: "\f7ae"; } +.bi-5-circle::before { content: "\f7af"; } +.bi-5-square-fill::before { content: "\f7b0"; } +.bi-5-square::before { content: "\f7b1"; } +.bi-6-circle-fill::before { content: "\f7b4"; } +.bi-6-circle::before { content: "\f7b5"; } +.bi-6-square-fill::before { content: "\f7b6"; } +.bi-6-square::before { content: "\f7b7"; } +.bi-7-circle-fill::before { content: "\f7ba"; } +.bi-7-circle::before { content: "\f7bb"; } +.bi-7-square-fill::before { content: "\f7bc"; } +.bi-7-square::before { content: "\f7bd"; } +.bi-8-circle-fill::before { content: "\f7c0"; } +.bi-8-circle::before { content: "\f7c1"; } +.bi-8-square-fill::before { content: "\f7c2"; } +.bi-8-square::before { content: "\f7c3"; } +.bi-9-circle-fill::before { content: "\f7c6"; } +.bi-9-circle::before { content: "\f7c7"; } +.bi-9-square-fill::before { content: "\f7c8"; } +.bi-9-square::before { content: "\f7c9"; } +.bi-airplane-engines-fill::before { content: "\f7ca"; } +.bi-airplane-engines::before { content: "\f7cb"; } +.bi-airplane-fill::before { content: "\f7cc"; } +.bi-airplane::before { content: "\f7cd"; } +.bi-alexa::before { content: "\f7ce"; } +.bi-alipay::before { content: "\f7cf"; } +.bi-android::before { content: "\f7d0"; } +.bi-android2::before { content: "\f7d1"; } +.bi-box-fill::before { content: "\f7d2"; } +.bi-box-seam-fill::before { content: "\f7d3"; } +.bi-browser-chrome::before { content: "\f7d4"; } +.bi-browser-edge::before { content: "\f7d5"; } +.bi-browser-firefox::before { content: "\f7d6"; } +.bi-browser-safari::before { content: "\f7d7"; } +.bi-c-circle-fill::before { content: "\f7da"; } +.bi-c-circle::before { content: "\f7db"; } +.bi-c-square-fill::before { content: "\f7dc"; } +.bi-c-square::before { content: "\f7dd"; } +.bi-capsule-pill::before { content: "\f7de"; } +.bi-capsule::before { content: "\f7df"; } +.bi-car-front-fill::before { content: "\f7e0"; } +.bi-car-front::before { content: "\f7e1"; } +.bi-cassette-fill::before { content: "\f7e2"; } +.bi-cassette::before { content: "\f7e3"; } +.bi-cc-circle-fill::before { content: "\f7e6"; } +.bi-cc-circle::before { content: "\f7e7"; } +.bi-cc-square-fill::before { content: "\f7e8"; } +.bi-cc-square::before { content: "\f7e9"; } +.bi-cup-hot-fill::before { content: "\f7ea"; } +.bi-cup-hot::before { content: "\f7eb"; } +.bi-currency-rupee::before { content: "\f7ec"; } +.bi-dropbox::before { content: "\f7ed"; } +.bi-escape::before { content: "\f7ee"; } +.bi-fast-forward-btn-fill::before { content: "\f7ef"; } +.bi-fast-forward-btn::before { content: "\f7f0"; } +.bi-fast-forward-circle-fill::before { content: "\f7f1"; } +.bi-fast-forward-circle::before { content: "\f7f2"; } +.bi-fast-forward-fill::before { content: "\f7f3"; } +.bi-fast-forward::before { content: "\f7f4"; } +.bi-filetype-sql::before { content: "\f7f5"; } +.bi-fire::before { content: "\f7f6"; } +.bi-google-play::before { content: "\f7f7"; } +.bi-h-circle-fill::before { content: "\f7fa"; } +.bi-h-circle::before { content: "\f7fb"; } +.bi-h-square-fill::before { content: "\f7fc"; } +.bi-h-square::before { content: "\f7fd"; } +.bi-indent::before { content: "\f7fe"; } +.bi-lungs-fill::before { content: "\f7ff"; } +.bi-lungs::before { content: "\f800"; } +.bi-microsoft-teams::before { content: "\f801"; } +.bi-p-circle-fill::before { content: "\f804"; } +.bi-p-circle::before { content: "\f805"; } +.bi-p-square-fill::before { content: "\f806"; } +.bi-p-square::before { content: "\f807"; } +.bi-pass-fill::before { content: "\f808"; } +.bi-pass::before { content: "\f809"; } +.bi-prescription::before { content: "\f80a"; } +.bi-prescription2::before { content: "\f80b"; } +.bi-r-circle-fill::before { content: "\f80e"; } +.bi-r-circle::before { content: "\f80f"; } +.bi-r-square-fill::before { content: "\f810"; } +.bi-r-square::before { content: "\f811"; } +.bi-repeat-1::before { content: "\f812"; } +.bi-repeat::before { content: "\f813"; } +.bi-rewind-btn-fill::before { content: "\f814"; } +.bi-rewind-btn::before { content: "\f815"; } +.bi-rewind-circle-fill::before { content: "\f816"; } +.bi-rewind-circle::before { content: "\f817"; } +.bi-rewind-fill::before { content: "\f818"; } +.bi-rewind::before { content: "\f819"; } +.bi-train-freight-front-fill::before { content: "\f81a"; } +.bi-train-freight-front::before { content: "\f81b"; } +.bi-train-front-fill::before { content: "\f81c"; } +.bi-train-front::before { content: "\f81d"; } +.bi-train-lightrail-front-fill::before { content: "\f81e"; } +.bi-train-lightrail-front::before { content: "\f81f"; } +.bi-truck-front-fill::before { content: "\f820"; } +.bi-truck-front::before { content: "\f821"; } +.bi-ubuntu::before { content: "\f822"; } +.bi-unindent::before { content: "\f823"; } +.bi-unity::before { content: "\f824"; } +.bi-universal-access-circle::before { content: "\f825"; } +.bi-universal-access::before { content: "\f826"; } +.bi-virus::before { content: "\f827"; } +.bi-virus2::before { content: "\f828"; } +.bi-wechat::before { content: "\f829"; } +.bi-yelp::before { content: "\f82a"; } +.bi-sign-stop-fill::before { content: "\f82b"; } +.bi-sign-stop-lights-fill::before { content: "\f82c"; } +.bi-sign-stop-lights::before { content: "\f82d"; } +.bi-sign-stop::before { content: "\f82e"; } +.bi-sign-turn-left-fill::before { content: "\f82f"; } +.bi-sign-turn-left::before { content: "\f830"; } +.bi-sign-turn-right-fill::before { content: "\f831"; } +.bi-sign-turn-right::before { content: "\f832"; } +.bi-sign-turn-slight-left-fill::before { content: "\f833"; } +.bi-sign-turn-slight-left::before { content: "\f834"; } +.bi-sign-turn-slight-right-fill::before { content: "\f835"; } +.bi-sign-turn-slight-right::before { content: "\f836"; } +.bi-sign-yield-fill::before { content: "\f837"; } +.bi-sign-yield::before { content: "\f838"; } +.bi-ev-station-fill::before { content: "\f839"; } +.bi-ev-station::before { content: "\f83a"; } +.bi-fuel-pump-diesel-fill::before { content: "\f83b"; } +.bi-fuel-pump-diesel::before { content: "\f83c"; } +.bi-fuel-pump-fill::before { content: "\f83d"; } +.bi-fuel-pump::before { content: "\f83e"; } +.bi-0-circle-fill::before { content: "\f83f"; } +.bi-0-circle::before { content: "\f840"; } +.bi-0-square-fill::before { content: "\f841"; } +.bi-0-square::before { content: "\f842"; } +.bi-rocket-fill::before { content: "\f843"; } +.bi-rocket-takeoff-fill::before { content: "\f844"; } +.bi-rocket-takeoff::before { content: "\f845"; } +.bi-rocket::before { content: "\f846"; } +.bi-stripe::before { content: "\f847"; } +.bi-subscript::before { content: "\f848"; } +.bi-superscript::before { content: "\f849"; } +.bi-trello::before { content: "\f84a"; } +.bi-envelope-at-fill::before { content: "\f84b"; } +.bi-envelope-at::before { content: "\f84c"; } +.bi-regex::before { content: "\f84d"; } +.bi-text-wrap::before { content: "\f84e"; } +.bi-sign-dead-end-fill::before { content: "\f84f"; } +.bi-sign-dead-end::before { content: "\f850"; } +.bi-sign-do-not-enter-fill::before { content: "\f851"; } +.bi-sign-do-not-enter::before { content: "\f852"; } +.bi-sign-intersection-fill::before { content: "\f853"; } +.bi-sign-intersection-side-fill::before { content: "\f854"; } +.bi-sign-intersection-side::before { content: "\f855"; } +.bi-sign-intersection-t-fill::before { content: "\f856"; } +.bi-sign-intersection-t::before { content: "\f857"; } +.bi-sign-intersection-y-fill::before { content: "\f858"; } +.bi-sign-intersection-y::before { content: "\f859"; } +.bi-sign-intersection::before { content: "\f85a"; } +.bi-sign-merge-left-fill::before { content: "\f85b"; } +.bi-sign-merge-left::before { content: "\f85c"; } +.bi-sign-merge-right-fill::before { content: "\f85d"; } +.bi-sign-merge-right::before { content: "\f85e"; } +.bi-sign-no-left-turn-fill::before { content: "\f85f"; } +.bi-sign-no-left-turn::before { content: "\f860"; } +.bi-sign-no-parking-fill::before { content: "\f861"; } +.bi-sign-no-parking::before { content: "\f862"; } +.bi-sign-no-right-turn-fill::before { content: "\f863"; } +.bi-sign-no-right-turn::before { content: "\f864"; } +.bi-sign-railroad-fill::before { content: "\f865"; } +.bi-sign-railroad::before { content: "\f866"; } +.bi-building-add::before { content: "\f867"; } +.bi-building-check::before { content: "\f868"; } +.bi-building-dash::before { content: "\f869"; } +.bi-building-down::before { content: "\f86a"; } +.bi-building-exclamation::before { content: "\f86b"; } +.bi-building-fill-add::before { content: "\f86c"; } +.bi-building-fill-check::before { content: "\f86d"; } +.bi-building-fill-dash::before { content: "\f86e"; } +.bi-building-fill-down::before { content: "\f86f"; } +.bi-building-fill-exclamation::before { content: "\f870"; } +.bi-building-fill-gear::before { content: "\f871"; } +.bi-building-fill-lock::before { content: "\f872"; } +.bi-building-fill-slash::before { content: "\f873"; } +.bi-building-fill-up::before { content: "\f874"; } +.bi-building-fill-x::before { content: "\f875"; } +.bi-building-fill::before { content: "\f876"; } +.bi-building-gear::before { content: "\f877"; } +.bi-building-lock::before { content: "\f878"; } +.bi-building-slash::before { content: "\f879"; } +.bi-building-up::before { content: "\f87a"; } +.bi-building-x::before { content: "\f87b"; } +.bi-buildings-fill::before { content: "\f87c"; } +.bi-buildings::before { content: "\f87d"; } +.bi-bus-front-fill::before { content: "\f87e"; } +.bi-bus-front::before { content: "\f87f"; } +.bi-ev-front-fill::before { content: "\f880"; } +.bi-ev-front::before { content: "\f881"; } +.bi-globe-americas::before { content: "\f882"; } +.bi-globe-asia-australia::before { content: "\f883"; } +.bi-globe-central-south-asia::before { content: "\f884"; } +.bi-globe-europe-africa::before { content: "\f885"; } +.bi-house-add-fill::before { content: "\f886"; } +.bi-house-add::before { content: "\f887"; } +.bi-house-check-fill::before { content: "\f888"; } +.bi-house-check::before { content: "\f889"; } +.bi-house-dash-fill::before { content: "\f88a"; } +.bi-house-dash::before { content: "\f88b"; } +.bi-house-down-fill::before { content: "\f88c"; } +.bi-house-down::before { content: "\f88d"; } +.bi-house-exclamation-fill::before { content: "\f88e"; } +.bi-house-exclamation::before { content: "\f88f"; } +.bi-house-gear-fill::before { content: "\f890"; } +.bi-house-gear::before { content: "\f891"; } +.bi-house-lock-fill::before { content: "\f892"; } +.bi-house-lock::before { content: "\f893"; } +.bi-house-slash-fill::before { content: "\f894"; } +.bi-house-slash::before { content: "\f895"; } +.bi-house-up-fill::before { content: "\f896"; } +.bi-house-up::before { content: "\f897"; } +.bi-house-x-fill::before { content: "\f898"; } +.bi-house-x::before { content: "\f899"; } +.bi-person-add::before { content: "\f89a"; } +.bi-person-down::before { content: "\f89b"; } +.bi-person-exclamation::before { content: "\f89c"; } +.bi-person-fill-add::before { content: "\f89d"; } +.bi-person-fill-check::before { content: "\f89e"; } +.bi-person-fill-dash::before { content: "\f89f"; } +.bi-person-fill-down::before { content: "\f8a0"; } +.bi-person-fill-exclamation::before { content: "\f8a1"; } +.bi-person-fill-gear::before { content: "\f8a2"; } +.bi-person-fill-lock::before { content: "\f8a3"; } +.bi-person-fill-slash::before { content: "\f8a4"; } +.bi-person-fill-up::before { content: "\f8a5"; } +.bi-person-fill-x::before { content: "\f8a6"; } +.bi-person-gear::before { content: "\f8a7"; } +.bi-person-lock::before { content: "\f8a8"; } +.bi-person-slash::before { content: "\f8a9"; } +.bi-person-up::before { content: "\f8aa"; } +.bi-scooter::before { content: "\f8ab"; } +.bi-taxi-front-fill::before { content: "\f8ac"; } +.bi-taxi-front::before { content: "\f8ad"; } +.bi-amd::before { content: "\f8ae"; } +.bi-database-add::before { content: "\f8af"; } +.bi-database-check::before { content: "\f8b0"; } +.bi-database-dash::before { content: "\f8b1"; } +.bi-database-down::before { content: "\f8b2"; } +.bi-database-exclamation::before { content: "\f8b3"; } +.bi-database-fill-add::before { content: "\f8b4"; } +.bi-database-fill-check::before { content: "\f8b5"; } +.bi-database-fill-dash::before { content: "\f8b6"; } +.bi-database-fill-down::before { content: "\f8b7"; } +.bi-database-fill-exclamation::before { content: "\f8b8"; } +.bi-database-fill-gear::before { content: "\f8b9"; } +.bi-database-fill-lock::before { content: "\f8ba"; } +.bi-database-fill-slash::before { content: "\f8bb"; } +.bi-database-fill-up::before { content: "\f8bc"; } +.bi-database-fill-x::before { content: "\f8bd"; } +.bi-database-fill::before { content: "\f8be"; } +.bi-database-gear::before { content: "\f8bf"; } +.bi-database-lock::before { content: "\f8c0"; } +.bi-database-slash::before { content: "\f8c1"; } +.bi-database-up::before { content: "\f8c2"; } +.bi-database-x::before { content: "\f8c3"; } +.bi-database::before { content: "\f8c4"; } +.bi-houses-fill::before { content: "\f8c5"; } +.bi-houses::before { content: "\f8c6"; } +.bi-nvidia::before { content: "\f8c7"; } +.bi-person-vcard-fill::before { content: "\f8c8"; } +.bi-person-vcard::before { content: "\f8c9"; } +.bi-sina-weibo::before { content: "\f8ca"; } +.bi-tencent-qq::before { content: "\f8cb"; } +.bi-wikipedia::before { content: "\f8cc"; } +.bi-alphabet-uppercase::before { content: "\f2a5"; } +.bi-alphabet::before { content: "\f68a"; } +.bi-amazon::before { content: "\f68d"; } +.bi-arrows-collapse-vertical::before { content: "\f690"; } +.bi-arrows-expand-vertical::before { content: "\f695"; } +.bi-arrows-vertical::before { content: "\f698"; } +.bi-arrows::before { content: "\f6a2"; } +.bi-ban-fill::before { content: "\f6a3"; } +.bi-ban::before { content: "\f6b6"; } +.bi-bing::before { content: "\f6c2"; } +.bi-cake::before { content: "\f6e0"; } +.bi-cake2::before { content: "\f6ed"; } +.bi-cookie::before { content: "\f6ee"; } +.bi-copy::before { content: "\f759"; } +.bi-crosshair::before { content: "\f769"; } +.bi-crosshair2::before { content: "\f794"; } +.bi-emoji-astonished-fill::before { content: "\f795"; } +.bi-emoji-astonished::before { content: "\f79a"; } +.bi-emoji-grimace-fill::before { content: "\f79b"; } +.bi-emoji-grimace::before { content: "\f7a0"; } +.bi-emoji-grin-fill::before { content: "\f7a1"; } +.bi-emoji-grin::before { content: "\f7a6"; } +.bi-emoji-surprise-fill::before { content: "\f7a7"; } +.bi-emoji-surprise::before { content: "\f7ac"; } +.bi-emoji-tear-fill::before { content: "\f7ad"; } +.bi-emoji-tear::before { content: "\f7b2"; } +.bi-envelope-arrow-down-fill::before { content: "\f7b3"; } +.bi-envelope-arrow-down::before { content: "\f7b8"; } +.bi-envelope-arrow-up-fill::before { content: "\f7b9"; } +.bi-envelope-arrow-up::before { content: "\f7be"; } +.bi-feather::before { content: "\f7bf"; } +.bi-feather2::before { content: "\f7c4"; } +.bi-floppy-fill::before { content: "\f7c5"; } +.bi-floppy::before { content: "\f7d8"; } +.bi-floppy2-fill::before { content: "\f7d9"; } +.bi-floppy2::before { content: "\f7e4"; } +.bi-gitlab::before { content: "\f7e5"; } +.bi-highlighter::before { content: "\f7f8"; } +.bi-marker-tip::before { content: "\f802"; } +.bi-nvme-fill::before { content: "\f803"; } +.bi-nvme::before { content: "\f80c"; } +.bi-opencollective::before { content: "\f80d"; } +.bi-pci-card-network::before { content: "\f8cd"; } +.bi-pci-card-sound::before { content: "\f8ce"; } +.bi-radar::before { content: "\f8cf"; } +.bi-send-arrow-down-fill::before { content: "\f8d0"; } +.bi-send-arrow-down::before { content: "\f8d1"; } +.bi-send-arrow-up-fill::before { content: "\f8d2"; } +.bi-send-arrow-up::before { content: "\f8d3"; } +.bi-sim-slash-fill::before { content: "\f8d4"; } +.bi-sim-slash::before { content: "\f8d5"; } +.bi-sourceforge::before { content: "\f8d6"; } +.bi-substack::before { content: "\f8d7"; } +.bi-threads-fill::before { content: "\f8d8"; } +.bi-threads::before { content: "\f8d9"; } +.bi-transparency::before { content: "\f8da"; } +.bi-twitter-x::before { content: "\f8db"; } +.bi-type-h4::before { content: "\f8dc"; } +.bi-type-h5::before { content: "\f8dd"; } +.bi-type-h6::before { content: "\f8de"; } +.bi-backpack-fill::before { content: "\f8df"; } +.bi-backpack::before { content: "\f8e0"; } +.bi-backpack2-fill::before { content: "\f8e1"; } +.bi-backpack2::before { content: "\f8e2"; } +.bi-backpack3-fill::before { content: "\f8e3"; } +.bi-backpack3::before { content: "\f8e4"; } +.bi-backpack4-fill::before { content: "\f8e5"; } +.bi-backpack4::before { content: "\f8e6"; } +.bi-brilliance::before { content: "\f8e7"; } +.bi-cake-fill::before { content: "\f8e8"; } +.bi-cake2-fill::before { content: "\f8e9"; } +.bi-duffle-fill::before { content: "\f8ea"; } +.bi-duffle::before { content: "\f8eb"; } +.bi-exposure::before { content: "\f8ec"; } +.bi-gender-neuter::before { content: "\f8ed"; } +.bi-highlights::before { content: "\f8ee"; } +.bi-luggage-fill::before { content: "\f8ef"; } +.bi-luggage::before { content: "\f8f0"; } +.bi-mailbox-flag::before { content: "\f8f1"; } +.bi-mailbox2-flag::before { content: "\f8f2"; } +.bi-noise-reduction::before { content: "\f8f3"; } +.bi-passport-fill::before { content: "\f8f4"; } +.bi-passport::before { content: "\f8f5"; } +.bi-person-arms-up::before { content: "\f8f6"; } +.bi-person-raised-hand::before { content: "\f8f7"; } +.bi-person-standing-dress::before { content: "\f8f8"; } +.bi-person-standing::before { content: "\f8f9"; } +.bi-person-walking::before { content: "\f8fa"; } +.bi-person-wheelchair::before { content: "\f8fb"; } +.bi-shadows::before { content: "\f8fc"; } +.bi-suitcase-fill::before { content: "\f8fd"; } +.bi-suitcase-lg-fill::before { content: "\f8fe"; } +.bi-suitcase-lg::before { content: "\f8ff"; } +.bi-suitcase::before { content: "\f900"; } +.bi-suitcase2-fill::before { content: "\f901"; } +.bi-suitcase2::before { content: "\f902"; } +.bi-vignette::before { content: "\f903"; } diff --git a/site_libs/bootstrap/bootstrap-icons.woff b/site_libs/bootstrap/bootstrap-icons.woff new file mode 100644 index 000000000..dbeeb0556 Binary files /dev/null and b/site_libs/bootstrap/bootstrap-icons.woff differ diff --git a/site_libs/bootstrap/bootstrap.min.css b/site_libs/bootstrap/bootstrap.min.css new file mode 100644 index 000000000..9333d3375 --- /dev/null +++ b/site_libs/bootstrap/bootstrap.min.css @@ -0,0 +1,12 @@ +/*! + * Bootstrap v5.3.1 (https://getbootstrap.com/) + * Copyright 2011-2023 The Bootstrap Authors + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */@import"https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@300;400;700&display=swap";:root,[data-bs-theme=light]{--bs-blue: #2780e3;--bs-indigo: #6610f2;--bs-purple: #613d7c;--bs-pink: #e83e8c;--bs-red: #ff0039;--bs-orange: #f0ad4e;--bs-yellow: #ff7518;--bs-green: #3fb618;--bs-teal: #20c997;--bs-cyan: #9954bb;--bs-black: #000;--bs-white: #fff;--bs-gray: #6c757d;--bs-gray-dark: #343a40;--bs-gray-100: #f8f9fa;--bs-gray-200: #e9ecef;--bs-gray-300: #dee2e6;--bs-gray-400: #ced4da;--bs-gray-500: #adb5bd;--bs-gray-600: #6c757d;--bs-gray-700: #495057;--bs-gray-800: #343a40;--bs-gray-900: #212529;--bs-default: #343a40;--bs-primary: #2780e3;--bs-secondary: #343a40;--bs-success: #3fb618;--bs-info: #9954bb;--bs-warning: #ff7518;--bs-danger: #ff0039;--bs-light: #f8f9fa;--bs-dark: #343a40;--bs-default-rgb: 52, 58, 64;--bs-primary-rgb: 39, 128, 227;--bs-secondary-rgb: 52, 58, 64;--bs-success-rgb: 63, 182, 24;--bs-info-rgb: 153, 84, 187;--bs-warning-rgb: 255, 117, 24;--bs-danger-rgb: 255, 0, 57;--bs-light-rgb: 248, 249, 250;--bs-dark-rgb: 52, 58, 64;--bs-primary-text-emphasis: #10335b;--bs-secondary-text-emphasis: #15171a;--bs-success-text-emphasis: #19490a;--bs-info-text-emphasis: #3d224b;--bs-warning-text-emphasis: #662f0a;--bs-danger-text-emphasis: #660017;--bs-light-text-emphasis: #495057;--bs-dark-text-emphasis: #495057;--bs-primary-bg-subtle: #d4e6f9;--bs-secondary-bg-subtle: #d6d8d9;--bs-success-bg-subtle: #d9f0d1;--bs-info-bg-subtle: #ebddf1;--bs-warning-bg-subtle: #ffe3d1;--bs-danger-bg-subtle: #ffccd7;--bs-light-bg-subtle: #fcfcfd;--bs-dark-bg-subtle: #ced4da;--bs-primary-border-subtle: #a9ccf4;--bs-secondary-border-subtle: #aeb0b3;--bs-success-border-subtle: #b2e2a3;--bs-info-border-subtle: #d6bbe4;--bs-warning-border-subtle: #ffc8a3;--bs-danger-border-subtle: #ff99b0;--bs-light-border-subtle: #e9ecef;--bs-dark-border-subtle: #adb5bd;--bs-white-rgb: 255, 255, 255;--bs-black-rgb: 0, 0, 0;--bs-font-sans-serif: "Source Sans Pro", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";--bs-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;--bs-gradient: linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0));--bs-root-font-size: 17px;--bs-body-font-family: "Source Sans Pro", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";--bs-body-font-size:1rem;--bs-body-font-weight: 400;--bs-body-line-height: 1.5;--bs-body-color: #343a40;--bs-body-color-rgb: 52, 58, 64;--bs-body-bg: #fff;--bs-body-bg-rgb: 255, 255, 255;--bs-emphasis-color: #000;--bs-emphasis-color-rgb: 0, 0, 0;--bs-secondary-color: rgba(52, 58, 64, 0.75);--bs-secondary-color-rgb: 52, 58, 64;--bs-secondary-bg: #e9ecef;--bs-secondary-bg-rgb: 233, 236, 239;--bs-tertiary-color: rgba(52, 58, 64, 0.5);--bs-tertiary-color-rgb: 52, 58, 64;--bs-tertiary-bg: #f8f9fa;--bs-tertiary-bg-rgb: 248, 249, 250;--bs-heading-color: inherit;--bs-link-color: #2761e3;--bs-link-color-rgb: 39, 97, 227;--bs-link-decoration: underline;--bs-link-hover-color: #1f4eb6;--bs-link-hover-color-rgb: 31, 78, 182;--bs-code-color: #7d12ba;--bs-highlight-bg: #ffe3d1;--bs-border-width: 1px;--bs-border-style: solid;--bs-border-color: #dee2e6;--bs-border-color-translucent: rgba(0, 0, 0, 0.175);--bs-border-radius: 0.25rem;--bs-border-radius-sm: 0.2em;--bs-border-radius-lg: 0.5rem;--bs-border-radius-xl: 1rem;--bs-border-radius-xxl: 2rem;--bs-border-radius-2xl: var(--bs-border-radius-xxl);--bs-border-radius-pill: 50rem;--bs-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-box-shadow-sm: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);--bs-box-shadow-lg: 0 1rem 3rem rgba(0, 0, 0, 0.175);--bs-box-shadow-inset: inset 0 1px 2px rgba(0, 0, 0, 0.075);--bs-focus-ring-width: 0.25rem;--bs-focus-ring-opacity: 0.25;--bs-focus-ring-color: rgba(39, 128, 227, 0.25);--bs-form-valid-color: #3fb618;--bs-form-valid-border-color: #3fb618;--bs-form-invalid-color: #ff0039;--bs-form-invalid-border-color: #ff0039}[data-bs-theme=dark]{color-scheme:dark;--bs-body-color: #dee2e6;--bs-body-color-rgb: 222, 226, 230;--bs-body-bg: #212529;--bs-body-bg-rgb: 33, 37, 41;--bs-emphasis-color: #fff;--bs-emphasis-color-rgb: 255, 255, 255;--bs-secondary-color: rgba(222, 226, 230, 0.75);--bs-secondary-color-rgb: 222, 226, 230;--bs-secondary-bg: #343a40;--bs-secondary-bg-rgb: 52, 58, 64;--bs-tertiary-color: rgba(222, 226, 230, 0.5);--bs-tertiary-color-rgb: 222, 226, 230;--bs-tertiary-bg: #2b3035;--bs-tertiary-bg-rgb: 43, 48, 53;--bs-primary-text-emphasis: #7db3ee;--bs-secondary-text-emphasis: #85898c;--bs-success-text-emphasis: #8cd374;--bs-info-text-emphasis: #c298d6;--bs-warning-text-emphasis: #ffac74;--bs-danger-text-emphasis: #ff6688;--bs-light-text-emphasis: #f8f9fa;--bs-dark-text-emphasis: #dee2e6;--bs-primary-bg-subtle: #081a2d;--bs-secondary-bg-subtle: #0a0c0d;--bs-success-bg-subtle: #0d2405;--bs-info-bg-subtle: #1f1125;--bs-warning-bg-subtle: #331705;--bs-danger-bg-subtle: #33000b;--bs-light-bg-subtle: #343a40;--bs-dark-bg-subtle: #1a1d20;--bs-primary-border-subtle: #174d88;--bs-secondary-border-subtle: #1f2326;--bs-success-border-subtle: #266d0e;--bs-info-border-subtle: #5c3270;--bs-warning-border-subtle: #99460e;--bs-danger-border-subtle: #990022;--bs-light-border-subtle: #495057;--bs-dark-border-subtle: #343a40;--bs-heading-color: inherit;--bs-link-color: #7db3ee;--bs-link-hover-color: #97c2f1;--bs-link-color-rgb: 125, 179, 238;--bs-link-hover-color-rgb: 151, 194, 241;--bs-code-color: white;--bs-border-color: #495057;--bs-border-color-translucent: rgba(255, 255, 255, 0.15);--bs-form-valid-color: #8cd374;--bs-form-valid-border-color: #8cd374;--bs-form-invalid-color: #ff6688;--bs-form-invalid-border-color: #ff6688}*,*::before,*::after{box-sizing:border-box}:root{font-size:var(--bs-root-font-size)}body{margin:0;font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);color:var(--bs-body-color);text-align:var(--bs-body-text-align);background-color:var(--bs-body-bg);-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(0,0,0,0)}hr{margin:1rem 0;color:inherit;border:0;border-top:1px solid;opacity:.25}h6,.h6,h5,.h5,h4,.h4,h3,.h3,h2,.h2,h1,.h1{margin-top:0;margin-bottom:.5rem;font-weight:400;line-height:1.2;color:var(--bs-heading-color)}h1,.h1{font-size:calc(1.325rem + 0.9vw)}@media(min-width: 1200px){h1,.h1{font-size:2rem}}h2,.h2{font-size:calc(1.29rem + 0.48vw)}@media(min-width: 1200px){h2,.h2{font-size:1.65rem}}h3,.h3{font-size:calc(1.27rem + 0.24vw)}@media(min-width: 1200px){h3,.h3{font-size:1.45rem}}h4,.h4{font-size:1.25rem}h5,.h5{font-size:1.1rem}h6,.h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[title]{text-decoration:underline dotted;-webkit-text-decoration:underline dotted;-moz-text-decoration:underline dotted;-ms-text-decoration:underline dotted;-o-text-decoration:underline dotted;cursor:help;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem;padding:.625rem 1.25rem;border-left:.25rem solid #e9ecef}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}b,strong{font-weight:bolder}small,.small{font-size:0.875em}mark,.mark{padding:.1875em;background-color:var(--bs-highlight-bg)}sub,sup{position:relative;font-size:0.75em;line-height:0;vertical-align:baseline}sub{bottom:-0.25em}sup{top:-0.5em}a{color:rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 1));text-decoration:underline;-webkit-text-decoration:underline;-moz-text-decoration:underline;-ms-text-decoration:underline;-o-text-decoration:underline}a:hover{--bs-link-color-rgb: var(--bs-link-hover-color-rgb)}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}pre,code,kbd,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:0.875em;color:#000;background-color:#f8f9fa;padding:.5rem;border:1px solid var(--bs-border-color, #dee2e6)}pre code{background-color:rgba(0,0,0,0);font-size:inherit;color:inherit;word-break:normal}code{font-size:0.875em;color:var(--bs-code-color);background-color:#f8f9fa;padding:.125rem .25rem;word-wrap:break-word}a>code{color:inherit}kbd{padding:.4rem .4rem;font-size:0.875em;color:#fff;background-color:#343a40}kbd kbd{padding:0;font-size:1em}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:rgba(52,58,64,.75);text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}thead,tbody,tfoot,tr,td,th{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]:not([type=date]):not([type=datetime-local]):not([type=month]):not([type=week]):not([type=time])::-webkit-calendar-picker-indicator{display:none !important}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button:not(:disabled),[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + 0.3vw);line-height:inherit}@media(min-width: 1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-text,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none !important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media(min-width: 1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:0.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:0.875em;color:#6c757d}.blockquote-footer::before{content:"— "}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:0.875em;color:rgba(52,58,64,.75)}.container,.container-fluid,.container-xxl,.container-xl,.container-lg,.container-md,.container-sm{--bs-gutter-x: 1.5rem;--bs-gutter-y: 0;width:100%;padding-right:calc(var(--bs-gutter-x)*.5);padding-left:calc(var(--bs-gutter-x)*.5);margin-right:auto;margin-left:auto}@media(min-width: 576px){.container-sm,.container{max-width:540px}}@media(min-width: 768px){.container-md,.container-sm,.container{max-width:720px}}@media(min-width: 992px){.container-lg,.container-md,.container-sm,.container{max-width:960px}}@media(min-width: 1200px){.container-xl,.container-lg,.container-md,.container-sm,.container{max-width:1140px}}@media(min-width: 1400px){.container-xxl,.container-xl,.container-lg,.container-md,.container-sm,.container{max-width:1320px}}:root{--bs-breakpoint-xs: 0;--bs-breakpoint-sm: 576px;--bs-breakpoint-md: 768px;--bs-breakpoint-lg: 992px;--bs-breakpoint-xl: 1200px;--bs-breakpoint-xxl: 1400px}.grid{display:grid;grid-template-rows:repeat(var(--bs-rows, 1), 1fr);grid-template-columns:repeat(var(--bs-columns, 12), 1fr);gap:var(--bs-gap, 1.5rem)}.grid .g-col-1{grid-column:auto/span 1}.grid .g-col-2{grid-column:auto/span 2}.grid .g-col-3{grid-column:auto/span 3}.grid .g-col-4{grid-column:auto/span 4}.grid .g-col-5{grid-column:auto/span 5}.grid .g-col-6{grid-column:auto/span 6}.grid .g-col-7{grid-column:auto/span 7}.grid .g-col-8{grid-column:auto/span 8}.grid .g-col-9{grid-column:auto/span 9}.grid .g-col-10{grid-column:auto/span 10}.grid .g-col-11{grid-column:auto/span 11}.grid .g-col-12{grid-column:auto/span 12}.grid .g-start-1{grid-column-start:1}.grid .g-start-2{grid-column-start:2}.grid .g-start-3{grid-column-start:3}.grid .g-start-4{grid-column-start:4}.grid .g-start-5{grid-column-start:5}.grid .g-start-6{grid-column-start:6}.grid .g-start-7{grid-column-start:7}.grid .g-start-8{grid-column-start:8}.grid .g-start-9{grid-column-start:9}.grid .g-start-10{grid-column-start:10}.grid .g-start-11{grid-column-start:11}@media(min-width: 576px){.grid .g-col-sm-1{grid-column:auto/span 1}.grid .g-col-sm-2{grid-column:auto/span 2}.grid .g-col-sm-3{grid-column:auto/span 3}.grid .g-col-sm-4{grid-column:auto/span 4}.grid .g-col-sm-5{grid-column:auto/span 5}.grid .g-col-sm-6{grid-column:auto/span 6}.grid .g-col-sm-7{grid-column:auto/span 7}.grid .g-col-sm-8{grid-column:auto/span 8}.grid .g-col-sm-9{grid-column:auto/span 9}.grid .g-col-sm-10{grid-column:auto/span 10}.grid .g-col-sm-11{grid-column:auto/span 11}.grid .g-col-sm-12{grid-column:auto/span 12}.grid .g-start-sm-1{grid-column-start:1}.grid .g-start-sm-2{grid-column-start:2}.grid .g-start-sm-3{grid-column-start:3}.grid .g-start-sm-4{grid-column-start:4}.grid .g-start-sm-5{grid-column-start:5}.grid .g-start-sm-6{grid-column-start:6}.grid .g-start-sm-7{grid-column-start:7}.grid .g-start-sm-8{grid-column-start:8}.grid .g-start-sm-9{grid-column-start:9}.grid .g-start-sm-10{grid-column-start:10}.grid .g-start-sm-11{grid-column-start:11}}@media(min-width: 768px){.grid .g-col-md-1{grid-column:auto/span 1}.grid .g-col-md-2{grid-column:auto/span 2}.grid .g-col-md-3{grid-column:auto/span 3}.grid .g-col-md-4{grid-column:auto/span 4}.grid .g-col-md-5{grid-column:auto/span 5}.grid .g-col-md-6{grid-column:auto/span 6}.grid .g-col-md-7{grid-column:auto/span 7}.grid .g-col-md-8{grid-column:auto/span 8}.grid .g-col-md-9{grid-column:auto/span 9}.grid .g-col-md-10{grid-column:auto/span 10}.grid .g-col-md-11{grid-column:auto/span 11}.grid .g-col-md-12{grid-column:auto/span 12}.grid .g-start-md-1{grid-column-start:1}.grid .g-start-md-2{grid-column-start:2}.grid .g-start-md-3{grid-column-start:3}.grid .g-start-md-4{grid-column-start:4}.grid .g-start-md-5{grid-column-start:5}.grid .g-start-md-6{grid-column-start:6}.grid .g-start-md-7{grid-column-start:7}.grid .g-start-md-8{grid-column-start:8}.grid .g-start-md-9{grid-column-start:9}.grid .g-start-md-10{grid-column-start:10}.grid .g-start-md-11{grid-column-start:11}}@media(min-width: 992px){.grid .g-col-lg-1{grid-column:auto/span 1}.grid .g-col-lg-2{grid-column:auto/span 2}.grid .g-col-lg-3{grid-column:auto/span 3}.grid .g-col-lg-4{grid-column:auto/span 4}.grid .g-col-lg-5{grid-column:auto/span 5}.grid .g-col-lg-6{grid-column:auto/span 6}.grid .g-col-lg-7{grid-column:auto/span 7}.grid .g-col-lg-8{grid-column:auto/span 8}.grid .g-col-lg-9{grid-column:auto/span 9}.grid .g-col-lg-10{grid-column:auto/span 10}.grid .g-col-lg-11{grid-column:auto/span 11}.grid .g-col-lg-12{grid-column:auto/span 12}.grid .g-start-lg-1{grid-column-start:1}.grid .g-start-lg-2{grid-column-start:2}.grid .g-start-lg-3{grid-column-start:3}.grid .g-start-lg-4{grid-column-start:4}.grid .g-start-lg-5{grid-column-start:5}.grid .g-start-lg-6{grid-column-start:6}.grid .g-start-lg-7{grid-column-start:7}.grid .g-start-lg-8{grid-column-start:8}.grid .g-start-lg-9{grid-column-start:9}.grid .g-start-lg-10{grid-column-start:10}.grid .g-start-lg-11{grid-column-start:11}}@media(min-width: 1200px){.grid .g-col-xl-1{grid-column:auto/span 1}.grid .g-col-xl-2{grid-column:auto/span 2}.grid .g-col-xl-3{grid-column:auto/span 3}.grid .g-col-xl-4{grid-column:auto/span 4}.grid .g-col-xl-5{grid-column:auto/span 5}.grid .g-col-xl-6{grid-column:auto/span 6}.grid .g-col-xl-7{grid-column:auto/span 7}.grid .g-col-xl-8{grid-column:auto/span 8}.grid .g-col-xl-9{grid-column:auto/span 9}.grid .g-col-xl-10{grid-column:auto/span 10}.grid .g-col-xl-11{grid-column:auto/span 11}.grid .g-col-xl-12{grid-column:auto/span 12}.grid .g-start-xl-1{grid-column-start:1}.grid .g-start-xl-2{grid-column-start:2}.grid .g-start-xl-3{grid-column-start:3}.grid .g-start-xl-4{grid-column-start:4}.grid .g-start-xl-5{grid-column-start:5}.grid .g-start-xl-6{grid-column-start:6}.grid .g-start-xl-7{grid-column-start:7}.grid .g-start-xl-8{grid-column-start:8}.grid .g-start-xl-9{grid-column-start:9}.grid .g-start-xl-10{grid-column-start:10}.grid .g-start-xl-11{grid-column-start:11}}@media(min-width: 1400px){.grid .g-col-xxl-1{grid-column:auto/span 1}.grid .g-col-xxl-2{grid-column:auto/span 2}.grid .g-col-xxl-3{grid-column:auto/span 3}.grid .g-col-xxl-4{grid-column:auto/span 4}.grid .g-col-xxl-5{grid-column:auto/span 5}.grid .g-col-xxl-6{grid-column:auto/span 6}.grid .g-col-xxl-7{grid-column:auto/span 7}.grid .g-col-xxl-8{grid-column:auto/span 8}.grid .g-col-xxl-9{grid-column:auto/span 9}.grid .g-col-xxl-10{grid-column:auto/span 10}.grid .g-col-xxl-11{grid-column:auto/span 11}.grid .g-col-xxl-12{grid-column:auto/span 12}.grid .g-start-xxl-1{grid-column-start:1}.grid .g-start-xxl-2{grid-column-start:2}.grid .g-start-xxl-3{grid-column-start:3}.grid .g-start-xxl-4{grid-column-start:4}.grid .g-start-xxl-5{grid-column-start:5}.grid .g-start-xxl-6{grid-column-start:6}.grid .g-start-xxl-7{grid-column-start:7}.grid .g-start-xxl-8{grid-column-start:8}.grid .g-start-xxl-9{grid-column-start:9}.grid .g-start-xxl-10{grid-column-start:10}.grid .g-start-xxl-11{grid-column-start:11}}.table{--bs-table-color-type: initial;--bs-table-bg-type: initial;--bs-table-color-state: initial;--bs-table-bg-state: initial;--bs-table-color: #343a40;--bs-table-bg: #fff;--bs-table-border-color: #dee2e6;--bs-table-accent-bg: transparent;--bs-table-striped-color: #343a40;--bs-table-striped-bg: rgba(0, 0, 0, 0.05);--bs-table-active-color: #343a40;--bs-table-active-bg: rgba(0, 0, 0, 0.1);--bs-table-hover-color: #343a40;--bs-table-hover-bg: rgba(0, 0, 0, 0.075);width:100%;margin-bottom:1rem;vertical-align:top;border-color:var(--bs-table-border-color)}.table>:not(caption)>*>*{padding:.5rem .5rem;color:var(--bs-table-color-state, var(--bs-table-color-type, var(--bs-table-color)));background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-bg-state, var(--bs-table-bg-type, var(--bs-table-accent-bg)))}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table-group-divider{border-top:calc(1px*2) solid #b2bac1}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-color-type: var(--bs-table-striped-color);--bs-table-bg-type: var(--bs-table-striped-bg)}.table-striped-columns>:not(caption)>tr>:nth-child(even){--bs-table-color-type: var(--bs-table-striped-color);--bs-table-bg-type: var(--bs-table-striped-bg)}.table-active{--bs-table-color-state: var(--bs-table-active-color);--bs-table-bg-state: var(--bs-table-active-bg)}.table-hover>tbody>tr:hover>*{--bs-table-color-state: var(--bs-table-hover-color);--bs-table-bg-state: var(--bs-table-hover-bg)}.table-primary{--bs-table-color: #000;--bs-table-bg: #d4e6f9;--bs-table-border-color: #bfcfe0;--bs-table-striped-bg: #c9dbed;--bs-table-striped-color: #000;--bs-table-active-bg: #bfcfe0;--bs-table-active-color: #000;--bs-table-hover-bg: #c4d5e6;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-secondary{--bs-table-color: #000;--bs-table-bg: #d6d8d9;--bs-table-border-color: #c1c2c3;--bs-table-striped-bg: #cbcdce;--bs-table-striped-color: #000;--bs-table-active-bg: #c1c2c3;--bs-table-active-color: #000;--bs-table-hover-bg: #c6c8c9;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-success{--bs-table-color: #000;--bs-table-bg: #d9f0d1;--bs-table-border-color: #c3d8bc;--bs-table-striped-bg: #cee4c7;--bs-table-striped-color: #000;--bs-table-active-bg: #c3d8bc;--bs-table-active-color: #000;--bs-table-hover-bg: #c9dec1;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-info{--bs-table-color: #000;--bs-table-bg: #ebddf1;--bs-table-border-color: #d4c7d9;--bs-table-striped-bg: #dfd2e5;--bs-table-striped-color: #000;--bs-table-active-bg: #d4c7d9;--bs-table-active-color: #000;--bs-table-hover-bg: #d9ccdf;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-warning{--bs-table-color: #000;--bs-table-bg: #ffe3d1;--bs-table-border-color: #e6ccbc;--bs-table-striped-bg: #f2d8c7;--bs-table-striped-color: #000;--bs-table-active-bg: #e6ccbc;--bs-table-active-color: #000;--bs-table-hover-bg: #ecd2c1;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-danger{--bs-table-color: #000;--bs-table-bg: #ffccd7;--bs-table-border-color: #e6b8c2;--bs-table-striped-bg: #f2c2cc;--bs-table-striped-color: #000;--bs-table-active-bg: #e6b8c2;--bs-table-active-color: #000;--bs-table-hover-bg: #ecbdc7;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-light{--bs-table-color: #000;--bs-table-bg: #f8f9fa;--bs-table-border-color: #dfe0e1;--bs-table-striped-bg: #ecedee;--bs-table-striped-color: #000;--bs-table-active-bg: #dfe0e1;--bs-table-active-color: #000;--bs-table-hover-bg: #e5e6e7;--bs-table-hover-color: #000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-dark{--bs-table-color: #fff;--bs-table-bg: #343a40;--bs-table-border-color: #484e53;--bs-table-striped-bg: #3e444a;--bs-table-striped-color: #fff;--bs-table-active-bg: #484e53;--bs-table-active-color: #fff;--bs-table-hover-bg: #43494e;--bs-table-hover-color: #fff;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media(max-width: 575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media(max-width: 1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label,.shiny-input-container .control-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(0.375rem + 1px);padding-bottom:calc(0.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(0.5rem + 1px);padding-bottom:calc(0.5rem + 1px);font-size:1.25rem}.col-form-label-sm{padding-top:calc(0.25rem + 1px);padding-bottom:calc(0.25rem + 1px);font-size:0.875rem}.form-text{margin-top:.25rem;font-size:0.875em;color:rgba(52,58,64,.75)}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#343a40;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:#fff;background-clip:padding-box;border:1px solid #dee2e6;border-radius:0;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:#343a40;background-color:#fff;border-color:#93c0f1;outline:0;box-shadow:0 0 0 .25rem rgba(39,128,227,.25)}.form-control::-webkit-date-and-time-value{min-width:85px;height:1.5em;margin:0}.form-control::-webkit-datetime-edit{display:block;padding:0}.form-control::placeholder{color:rgba(52,58,64,.75);opacity:1}.form-control:disabled{background-color:#e9ecef;opacity:1}.form-control::file-selector-button{padding:.375rem .75rem;margin:-0.375rem -0.75rem;margin-inline-end:.75rem;color:#343a40;background-color:#f8f9fa;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#e9ecef}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:#343a40;background-color:rgba(0,0,0,0);border:solid rgba(0,0,0,0);border-width:1px 0}.form-control-plaintext:focus{outline:0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + 0.5rem + calc(1px * 2));padding:.25rem .5rem;font-size:0.875rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-0.25rem -0.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + calc(1px * 2));padding:.5rem 1rem;font-size:1.25rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-0.5rem -1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + 0.75rem + calc(1px * 2))}textarea.form-control-sm{min-height:calc(1.5em + 0.5rem + calc(1px * 2))}textarea.form-control-lg{min-height:calc(1.5em + 1rem + calc(1px * 2))}.form-control-color{width:3rem;height:calc(1.5em + 0.75rem + calc(1px * 2));padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{border:0 !important}.form-control-color::-webkit-color-swatch{border:0 !important}.form-control-color.form-control-sm{height:calc(1.5em + 0.5rem + calc(1px * 2))}.form-control-color.form-control-lg{height:calc(1.5em + 1rem + calc(1px * 2))}.form-select{--bs-form-select-bg-img: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e");display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#343a40;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:#fff;background-image:var(--bs-form-select-bg-img),var(--bs-form-select-bg-icon, none);background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:1px solid #dee2e6;border-radius:0;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-select{transition:none}}.form-select:focus{border-color:#93c0f1;outline:0;box-shadow:0 0 0 .25rem rgba(39,128,227,.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:rgba(0,0,0,0);text-shadow:0 0 0 #343a40}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:0.875rem}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}[data-bs-theme=dark] .form-select{--bs-form-select-bg-img: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23dee2e6' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e")}.form-check,.shiny-input-container .checkbox,.shiny-input-container .radio{display:block;min-height:1.5rem;padding-left:0;margin-bottom:.125rem}.form-check .form-check-input,.form-check .shiny-input-container .checkbox input,.form-check .shiny-input-container .radio input,.shiny-input-container .checkbox .form-check-input,.shiny-input-container .checkbox .shiny-input-container .checkbox input,.shiny-input-container .checkbox .shiny-input-container .radio input,.shiny-input-container .radio .form-check-input,.shiny-input-container .radio .shiny-input-container .checkbox input,.shiny-input-container .radio .shiny-input-container .radio input{float:left;margin-left:0}.form-check-reverse{padding-right:0;padding-left:0;text-align:right}.form-check-reverse .form-check-input{float:right;margin-right:0;margin-left:0}.form-check-input,.shiny-input-container .checkbox input,.shiny-input-container .checkbox-inline input,.shiny-input-container .radio input,.shiny-input-container .radio-inline input{--bs-form-check-bg: #fff;width:1em;height:1em;margin-top:.25em;vertical-align:top;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:var(--bs-form-check-bg);background-image:var(--bs-form-check-bg-image);background-repeat:no-repeat;background-position:center;background-size:contain;border:1px solid #dee2e6;print-color-adjust:exact}.form-check-input[type=radio],.shiny-input-container .checkbox input[type=radio],.shiny-input-container .checkbox-inline input[type=radio],.shiny-input-container .radio input[type=radio],.shiny-input-container .radio-inline input[type=radio]{border-radius:50%}.form-check-input:active,.shiny-input-container .checkbox input:active,.shiny-input-container .checkbox-inline input:active,.shiny-input-container .radio input:active,.shiny-input-container .radio-inline input:active{filter:brightness(90%)}.form-check-input:focus,.shiny-input-container .checkbox input:focus,.shiny-input-container .checkbox-inline input:focus,.shiny-input-container .radio input:focus,.shiny-input-container .radio-inline input:focus{border-color:#93c0f1;outline:0;box-shadow:0 0 0 .25rem rgba(39,128,227,.25)}.form-check-input:checked,.shiny-input-container .checkbox input:checked,.shiny-input-container .checkbox-inline input:checked,.shiny-input-container .radio input:checked,.shiny-input-container .radio-inline input:checked{background-color:#2780e3;border-color:#2780e3}.form-check-input:checked[type=checkbox],.shiny-input-container .checkbox input:checked[type=checkbox],.shiny-input-container .checkbox-inline input:checked[type=checkbox],.shiny-input-container .radio input:checked[type=checkbox],.shiny-input-container .radio-inline input:checked[type=checkbox]{--bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='m6 10 3 3 6-6'/%3e%3c/svg%3e")}.form-check-input:checked[type=radio],.shiny-input-container .checkbox input:checked[type=radio],.shiny-input-container .checkbox-inline input:checked[type=radio],.shiny-input-container .radio input:checked[type=radio],.shiny-input-container .radio-inline input:checked[type=radio]{--bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e")}.form-check-input[type=checkbox]:indeterminate,.shiny-input-container .checkbox input[type=checkbox]:indeterminate,.shiny-input-container .checkbox-inline input[type=checkbox]:indeterminate,.shiny-input-container .radio input[type=checkbox]:indeterminate,.shiny-input-container .radio-inline input[type=checkbox]:indeterminate{background-color:#2780e3;border-color:#2780e3;--bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e")}.form-check-input:disabled,.shiny-input-container .checkbox input:disabled,.shiny-input-container .checkbox-inline input:disabled,.shiny-input-container .radio input:disabled,.shiny-input-container .radio-inline input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input[disabled]~.form-check-label,.form-check-input[disabled]~span,.form-check-input:disabled~.form-check-label,.form-check-input:disabled~span,.shiny-input-container .checkbox input[disabled]~.form-check-label,.shiny-input-container .checkbox input[disabled]~span,.shiny-input-container .checkbox input:disabled~.form-check-label,.shiny-input-container .checkbox input:disabled~span,.shiny-input-container .checkbox-inline input[disabled]~.form-check-label,.shiny-input-container .checkbox-inline input[disabled]~span,.shiny-input-container .checkbox-inline input:disabled~.form-check-label,.shiny-input-container .checkbox-inline input:disabled~span,.shiny-input-container .radio input[disabled]~.form-check-label,.shiny-input-container .radio input[disabled]~span,.shiny-input-container .radio input:disabled~.form-check-label,.shiny-input-container .radio input:disabled~span,.shiny-input-container .radio-inline input[disabled]~.form-check-label,.shiny-input-container .radio-inline input[disabled]~span,.shiny-input-container .radio-inline input:disabled~.form-check-label,.shiny-input-container .radio-inline input:disabled~span{cursor:default;opacity:.5}.form-check-label,.shiny-input-container .checkbox label,.shiny-input-container .checkbox-inline label,.shiny-input-container .radio label,.shiny-input-container .radio-inline label{cursor:pointer}.form-switch{padding-left:2.5em}.form-switch .form-check-input{--bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e");width:2em;margin-left:-2.5em;background-image:var(--bs-form-switch-bg);background-position:left center;transition:background-position .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{--bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%2393c0f1'/%3e%3c/svg%3e")}.form-switch .form-check-input:checked{background-position:right center;--bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.form-switch.form-check-reverse{padding-right:2.5em;padding-left:0}.form-switch.form-check-reverse .form-check-input{margin-right:-2.5em;margin-left:0}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.btn-check[disabled]+.btn,.btn-check:disabled+.btn{pointer-events:none;filter:none;opacity:.65}[data-bs-theme=dark] .form-switch .form-check-input:not(:checked):not(:focus){--bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%28255, 255, 255, 0.25%29'/%3e%3c/svg%3e")}.form-range{width:100%;height:1.5rem;padding:0;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:rgba(0,0,0,0)}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(39,128,227,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(39,128,227,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-0.25rem;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:#2780e3;border:0;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-range::-webkit-slider-thumb{transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#bed9f7}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:rgba(0,0,0,0);cursor:pointer;background-color:#f8f9fa;border-color:rgba(0,0,0,0)}.form-range::-moz-range-thumb{width:1rem;height:1rem;appearance:none;-webkit-appearance:none;-moz-appearance:none;-ms-appearance:none;-o-appearance:none;background-color:#2780e3;border:0;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.form-range::-moz-range-thumb{transition:none}}.form-range::-moz-range-thumb:active{background-color:#bed9f7}.form-range::-moz-range-track{width:100%;height:.5rem;color:rgba(0,0,0,0);cursor:pointer;background-color:#f8f9fa;border-color:rgba(0,0,0,0)}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:rgba(52,58,64,.75)}.form-range:disabled::-moz-range-thumb{background-color:rgba(52,58,64,.75)}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-control-plaintext,.form-floating>.form-select{height:calc(3.5rem + calc(1px * 2));min-height:calc(3.5rem + calc(1px * 2));line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;z-index:2;height:100%;padding:1rem .75rem;overflow:hidden;text-align:start;text-overflow:ellipsis;white-space:nowrap;pointer-events:none;border:1px solid rgba(0,0,0,0);transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media(prefers-reduced-motion: reduce){.form-floating>label{transition:none}}.form-floating>.form-control,.form-floating>.form-control-plaintext{padding:1rem .75rem}.form-floating>.form-control::placeholder,.form-floating>.form-control-plaintext::placeholder{color:rgba(0,0,0,0)}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown),.form-floating>.form-control-plaintext:focus,.form-floating>.form-control-plaintext:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill,.form-floating>.form-control-plaintext:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-control-plaintext~label,.form-floating>.form-select~label{color:rgba(var(--bs-body-color-rgb), 0.65);transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.form-floating>.form-control:focus~label::after,.form-floating>.form-control:not(:placeholder-shown)~label::after,.form-floating>.form-control-plaintext~label::after,.form-floating>.form-select~label::after{position:absolute;inset:1rem .375rem;z-index:-1;height:1.5em;content:"";background-color:#fff}.form-floating>.form-control:-webkit-autofill~label{color:rgba(var(--bs-body-color-rgb), 0.65);transform:scale(0.85) translateY(-0.5rem) translateX(0.15rem)}.form-floating>.form-control-plaintext~label{border-width:1px 0}.form-floating>:disabled~label,.form-floating>.form-control:disabled~label{color:#6c757d}.form-floating>:disabled~label::after,.form-floating>.form-control:disabled~label::after{background-color:#e9ecef}.input-group{position:relative;display:flex;display:-webkit-flex;flex-wrap:wrap;-webkit-flex-wrap:wrap;align-items:stretch;-webkit-align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select,.input-group>.form-floating{position:relative;flex:1 1 auto;-webkit-flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus,.input-group>.form-floating:focus-within{z-index:5}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:5}.input-group-text{display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#343a40;text-align:center;white-space:nowrap;background-color:#f8f9fa;border:1px solid #dee2e6}.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text,.input-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem}.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text,.input-group-sm>.btn{padding:.25rem .5rem;font-size:0.875rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:calc(1px*-1)}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:0.875em;color:#3fb618}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:0.875rem;color:#fff;background-color:#3fb618}.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip,.is-valid~.valid-feedback,.is-valid~.valid-tooltip{display:block}.was-validated .form-control:valid,.form-control.is-valid{border-color:#3fb618;padding-right:calc(1.5em + 0.75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%233fb618' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(0.375em + 0.1875rem) center;background-size:calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-control:valid:focus,.form-control.is-valid:focus{border-color:#3fb618;box-shadow:0 0 0 .25rem rgba(63,182,24,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + 0.75rem);background-position:top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem)}.was-validated .form-select:valid,.form-select.is-valid{border-color:#3fb618}.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"],.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"]{--bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%233fb618' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-select:valid:focus,.form-select.is-valid:focus{border-color:#3fb618;box-shadow:0 0 0 .25rem rgba(63,182,24,.25)}.was-validated .form-control-color:valid,.form-control-color.is-valid{width:calc(3rem + calc(1.5em + 0.75rem))}.was-validated .form-check-input:valid,.form-check-input.is-valid{border-color:#3fb618}.was-validated .form-check-input:valid:checked,.form-check-input.is-valid:checked{background-color:#3fb618}.was-validated .form-check-input:valid:focus,.form-check-input.is-valid:focus{box-shadow:0 0 0 .25rem rgba(63,182,24,.25)}.was-validated .form-check-input:valid~.form-check-label,.form-check-input.is-valid~.form-check-label{color:#3fb618}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.was-validated .input-group>.form-control:not(:focus):valid,.input-group>.form-control:not(:focus).is-valid,.was-validated .input-group>.form-select:not(:focus):valid,.input-group>.form-select:not(:focus).is-valid,.was-validated .input-group>.form-floating:not(:focus-within):valid,.input-group>.form-floating:not(:focus-within).is-valid{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:0.875em;color:#ff0039}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:0.875rem;color:#fff;background-color:#ff0039}.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip,.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip{display:block}.was-validated .form-control:invalid,.form-control.is-invalid{border-color:#ff0039;padding-right:calc(1.5em + 0.75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23ff0039'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23ff0039' stroke='none'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(0.375em + 0.1875rem) center;background-size:calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus{border-color:#ff0039;box-shadow:0 0 0 .25rem rgba(255,0,57,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + 0.75rem);background-position:top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem)}.was-validated .form-select:invalid,.form-select.is-invalid{border-color:#ff0039}.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"],.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"]{--bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23ff0039'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23ff0039' stroke='none'/%3e%3c/svg%3e");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(0.75em + 0.375rem) calc(0.75em + 0.375rem)}.was-validated .form-select:invalid:focus,.form-select.is-invalid:focus{border-color:#ff0039;box-shadow:0 0 0 .25rem rgba(255,0,57,.25)}.was-validated .form-control-color:invalid,.form-control-color.is-invalid{width:calc(3rem + calc(1.5em + 0.75rem))}.was-validated .form-check-input:invalid,.form-check-input.is-invalid{border-color:#ff0039}.was-validated .form-check-input:invalid:checked,.form-check-input.is-invalid:checked{background-color:#ff0039}.was-validated .form-check-input:invalid:focus,.form-check-input.is-invalid:focus{box-shadow:0 0 0 .25rem rgba(255,0,57,.25)}.was-validated .form-check-input:invalid~.form-check-label,.form-check-input.is-invalid~.form-check-label{color:#ff0039}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.was-validated .input-group>.form-control:not(:focus):invalid,.input-group>.form-control:not(:focus).is-invalid,.was-validated .input-group>.form-select:not(:focus):invalid,.input-group>.form-select:not(:focus).is-invalid,.was-validated .input-group>.form-floating:not(:focus-within):invalid,.input-group>.form-floating:not(:focus-within).is-invalid{z-index:4}.btn{--bs-btn-padding-x: 0.75rem;--bs-btn-padding-y: 0.375rem;--bs-btn-font-family: ;--bs-btn-font-size:1rem;--bs-btn-font-weight: 400;--bs-btn-line-height: 1.5;--bs-btn-color: #343a40;--bs-btn-bg: transparent;--bs-btn-border-width: 1px;--bs-btn-border-color: transparent;--bs-btn-border-radius: 0.25rem;--bs-btn-hover-border-color: transparent;--bs-btn-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);--bs-btn-disabled-opacity: 0.65;--bs-btn-focus-box-shadow: 0 0 0 0.25rem rgba(var(--bs-btn-focus-shadow-rgb), .5);display:inline-block;padding:var(--bs-btn-padding-y) var(--bs-btn-padding-x);font-family:var(--bs-btn-font-family);font-size:var(--bs-btn-font-size);font-weight:var(--bs-btn-font-weight);line-height:var(--bs-btn-line-height);color:var(--bs-btn-color);text-align:center;text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;vertical-align:middle;cursor:pointer;user-select:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;border:var(--bs-btn-border-width) solid var(--bs-btn-border-color);background-color:var(--bs-btn-bg);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.btn{transition:none}}.btn:hover{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color)}.btn-check+.btn:hover{color:var(--bs-btn-color);background-color:var(--bs-btn-bg);border-color:var(--bs-btn-border-color)}.btn:focus-visible{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:focus-visible+.btn{border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:checked+.btn,:not(.btn-check)+.btn:active,.btn:first-child:active,.btn.active,.btn.show{color:var(--bs-btn-active-color);background-color:var(--bs-btn-active-bg);border-color:var(--bs-btn-active-border-color)}.btn-check:checked+.btn:focus-visible,:not(.btn-check)+.btn:active:focus-visible,.btn:first-child:active:focus-visible,.btn.active:focus-visible,.btn.show:focus-visible{box-shadow:var(--bs-btn-focus-box-shadow)}.btn:disabled,.btn.disabled,fieldset:disabled .btn{color:var(--bs-btn-disabled-color);pointer-events:none;background-color:var(--bs-btn-disabled-bg);border-color:var(--bs-btn-disabled-border-color);opacity:var(--bs-btn-disabled-opacity)}.btn-default{--bs-btn-color: #fff;--bs-btn-bg: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #2c3136;--bs-btn-hover-border-color: #2a2e33;--bs-btn-focus-shadow-rgb: 82, 88, 93;--bs-btn-active-color: #fff;--bs-btn-active-bg: #2a2e33;--bs-btn-active-border-color: #272c30;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #343a40;--bs-btn-disabled-border-color: #343a40}.btn-primary{--bs-btn-color: #fff;--bs-btn-bg: #2780e3;--bs-btn-border-color: #2780e3;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #216dc1;--bs-btn-hover-border-color: #1f66b6;--bs-btn-focus-shadow-rgb: 71, 147, 231;--bs-btn-active-color: #fff;--bs-btn-active-bg: #1f66b6;--bs-btn-active-border-color: #1d60aa;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #2780e3;--bs-btn-disabled-border-color: #2780e3}.btn-secondary{--bs-btn-color: #fff;--bs-btn-bg: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #2c3136;--bs-btn-hover-border-color: #2a2e33;--bs-btn-focus-shadow-rgb: 82, 88, 93;--bs-btn-active-color: #fff;--bs-btn-active-bg: #2a2e33;--bs-btn-active-border-color: #272c30;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #343a40;--bs-btn-disabled-border-color: #343a40}.btn-success{--bs-btn-color: #fff;--bs-btn-bg: #3fb618;--bs-btn-border-color: #3fb618;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #369b14;--bs-btn-hover-border-color: #329213;--bs-btn-focus-shadow-rgb: 92, 193, 59;--bs-btn-active-color: #fff;--bs-btn-active-bg: #329213;--bs-btn-active-border-color: #2f8912;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #3fb618;--bs-btn-disabled-border-color: #3fb618}.btn-info{--bs-btn-color: #fff;--bs-btn-bg: #9954bb;--bs-btn-border-color: #9954bb;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #82479f;--bs-btn-hover-border-color: #7a4396;--bs-btn-focus-shadow-rgb: 168, 110, 197;--bs-btn-active-color: #fff;--bs-btn-active-bg: #7a4396;--bs-btn-active-border-color: #733f8c;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #9954bb;--bs-btn-disabled-border-color: #9954bb}.btn-warning{--bs-btn-color: #fff;--bs-btn-bg: #ff7518;--bs-btn-border-color: #ff7518;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #d96314;--bs-btn-hover-border-color: #cc5e13;--bs-btn-focus-shadow-rgb: 255, 138, 59;--bs-btn-active-color: #fff;--bs-btn-active-bg: #cc5e13;--bs-btn-active-border-color: #bf5812;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #ff7518;--bs-btn-disabled-border-color: #ff7518}.btn-danger{--bs-btn-color: #fff;--bs-btn-bg: #ff0039;--bs-btn-border-color: #ff0039;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #d90030;--bs-btn-hover-border-color: #cc002e;--bs-btn-focus-shadow-rgb: 255, 38, 87;--bs-btn-active-color: #fff;--bs-btn-active-bg: #cc002e;--bs-btn-active-border-color: #bf002b;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #ff0039;--bs-btn-disabled-border-color: #ff0039}.btn-light{--bs-btn-color: #000;--bs-btn-bg: #f8f9fa;--bs-btn-border-color: #f8f9fa;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #d3d4d5;--bs-btn-hover-border-color: #c6c7c8;--bs-btn-focus-shadow-rgb: 211, 212, 213;--bs-btn-active-color: #000;--bs-btn-active-bg: #c6c7c8;--bs-btn-active-border-color: #babbbc;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #000;--bs-btn-disabled-bg: #f8f9fa;--bs-btn-disabled-border-color: #f8f9fa}.btn-dark{--bs-btn-color: #fff;--bs-btn-bg: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #52585d;--bs-btn-hover-border-color: #484e53;--bs-btn-focus-shadow-rgb: 82, 88, 93;--bs-btn-active-color: #fff;--bs-btn-active-bg: #5d6166;--bs-btn-active-border-color: #484e53;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #343a40;--bs-btn-disabled-border-color: #343a40}.btn-outline-default{--bs-btn-color: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #343a40;--bs-btn-hover-border-color: #343a40;--bs-btn-focus-shadow-rgb: 52, 58, 64;--bs-btn-active-color: #fff;--bs-btn-active-bg: #343a40;--bs-btn-active-border-color: #343a40;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #343a40;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #343a40;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-primary{--bs-btn-color: #2780e3;--bs-btn-border-color: #2780e3;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #2780e3;--bs-btn-hover-border-color: #2780e3;--bs-btn-focus-shadow-rgb: 39, 128, 227;--bs-btn-active-color: #fff;--bs-btn-active-bg: #2780e3;--bs-btn-active-border-color: #2780e3;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #2780e3;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #2780e3;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-secondary{--bs-btn-color: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #343a40;--bs-btn-hover-border-color: #343a40;--bs-btn-focus-shadow-rgb: 52, 58, 64;--bs-btn-active-color: #fff;--bs-btn-active-bg: #343a40;--bs-btn-active-border-color: #343a40;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #343a40;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #343a40;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-success{--bs-btn-color: #3fb618;--bs-btn-border-color: #3fb618;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #3fb618;--bs-btn-hover-border-color: #3fb618;--bs-btn-focus-shadow-rgb: 63, 182, 24;--bs-btn-active-color: #fff;--bs-btn-active-bg: #3fb618;--bs-btn-active-border-color: #3fb618;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #3fb618;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #3fb618;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-info{--bs-btn-color: #9954bb;--bs-btn-border-color: #9954bb;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #9954bb;--bs-btn-hover-border-color: #9954bb;--bs-btn-focus-shadow-rgb: 153, 84, 187;--bs-btn-active-color: #fff;--bs-btn-active-bg: #9954bb;--bs-btn-active-border-color: #9954bb;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #9954bb;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #9954bb;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-warning{--bs-btn-color: #ff7518;--bs-btn-border-color: #ff7518;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #ff7518;--bs-btn-hover-border-color: #ff7518;--bs-btn-focus-shadow-rgb: 255, 117, 24;--bs-btn-active-color: #fff;--bs-btn-active-bg: #ff7518;--bs-btn-active-border-color: #ff7518;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #ff7518;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #ff7518;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-danger{--bs-btn-color: #ff0039;--bs-btn-border-color: #ff0039;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #ff0039;--bs-btn-hover-border-color: #ff0039;--bs-btn-focus-shadow-rgb: 255, 0, 57;--bs-btn-active-color: #fff;--bs-btn-active-bg: #ff0039;--bs-btn-active-border-color: #ff0039;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #ff0039;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #ff0039;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-light{--bs-btn-color: #f8f9fa;--bs-btn-border-color: #f8f9fa;--bs-btn-hover-color: #000;--bs-btn-hover-bg: #f8f9fa;--bs-btn-hover-border-color: #f8f9fa;--bs-btn-focus-shadow-rgb: 248, 249, 250;--bs-btn-active-color: #000;--bs-btn-active-bg: #f8f9fa;--bs-btn-active-border-color: #f8f9fa;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #f8f9fa;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #f8f9fa;--bs-btn-bg: transparent;--bs-gradient: none}.btn-outline-dark{--bs-btn-color: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #fff;--bs-btn-hover-bg: #343a40;--bs-btn-hover-border-color: #343a40;--bs-btn-focus-shadow-rgb: 52, 58, 64;--bs-btn-active-color: #fff;--bs-btn-active-bg: #343a40;--bs-btn-active-border-color: #343a40;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #343a40;--bs-btn-disabled-bg: transparent;--bs-btn-disabled-border-color: #343a40;--bs-btn-bg: transparent;--bs-gradient: none}.btn-link{--bs-btn-font-weight: 400;--bs-btn-color: #2761e3;--bs-btn-bg: transparent;--bs-btn-border-color: transparent;--bs-btn-hover-color: #1f4eb6;--bs-btn-hover-border-color: transparent;--bs-btn-active-color: #1f4eb6;--bs-btn-active-border-color: transparent;--bs-btn-disabled-color: #6c757d;--bs-btn-disabled-border-color: transparent;--bs-btn-box-shadow: 0 0 0 #000;--bs-btn-focus-shadow-rgb: 71, 121, 231;text-decoration:underline;-webkit-text-decoration:underline;-moz-text-decoration:underline;-ms-text-decoration:underline;-o-text-decoration:underline}.btn-link:focus-visible{color:var(--bs-btn-color)}.btn-link:hover{color:var(--bs-btn-hover-color)}.btn-lg,.btn-group-lg>.btn{--bs-btn-padding-y: 0.5rem;--bs-btn-padding-x: 1rem;--bs-btn-font-size:1.25rem;--bs-btn-border-radius: 0.5rem}.btn-sm,.btn-group-sm>.btn{--bs-btn-padding-y: 0.25rem;--bs-btn-padding-x: 0.5rem;--bs-btn-font-size:0.875rem;--bs-btn-border-radius: 0.2em}.fade{transition:opacity .15s linear}@media(prefers-reduced-motion: reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .2s ease}@media(prefers-reduced-motion: reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{width:0;height:auto;transition:width .35s ease}@media(prefers-reduced-motion: reduce){.collapsing.collapse-horizontal{transition:none}}.dropup,.dropend,.dropdown,.dropstart,.dropup-center,.dropdown-center{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid rgba(0,0,0,0);border-bottom:0;border-left:.3em solid rgba(0,0,0,0)}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{--bs-dropdown-zindex: 1000;--bs-dropdown-min-width: 10rem;--bs-dropdown-padding-x: 0;--bs-dropdown-padding-y: 0.5rem;--bs-dropdown-spacer: 0.125rem;--bs-dropdown-font-size:1rem;--bs-dropdown-color: #343a40;--bs-dropdown-bg: #fff;--bs-dropdown-border-color: rgba(0, 0, 0, 0.175);--bs-dropdown-border-radius: 0.25rem;--bs-dropdown-border-width: 1px;--bs-dropdown-inner-border-radius: calc(0.25rem - 1px);--bs-dropdown-divider-bg: rgba(0, 0, 0, 0.175);--bs-dropdown-divider-margin-y: 0.5rem;--bs-dropdown-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-dropdown-link-color: #343a40;--bs-dropdown-link-hover-color: #343a40;--bs-dropdown-link-hover-bg: #f8f9fa;--bs-dropdown-link-active-color: #fff;--bs-dropdown-link-active-bg: #2780e3;--bs-dropdown-link-disabled-color: rgba(52, 58, 64, 0.5);--bs-dropdown-item-padding-x: 1rem;--bs-dropdown-item-padding-y: 0.25rem;--bs-dropdown-header-color: #6c757d;--bs-dropdown-header-padding-x: 1rem;--bs-dropdown-header-padding-y: 0.5rem;position:absolute;z-index:var(--bs-dropdown-zindex);display:none;min-width:var(--bs-dropdown-min-width);padding:var(--bs-dropdown-padding-y) var(--bs-dropdown-padding-x);margin:0;font-size:var(--bs-dropdown-font-size);color:var(--bs-dropdown-color);text-align:left;list-style:none;background-color:var(--bs-dropdown-bg);background-clip:padding-box;border:var(--bs-dropdown-border-width) solid var(--bs-dropdown-border-color)}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:var(--bs-dropdown-spacer)}.dropdown-menu-start{--bs-position: start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position: end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media(min-width: 576px){.dropdown-menu-sm-start{--bs-position: start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position: end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 768px){.dropdown-menu-md-start{--bs-position: start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position: end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 992px){.dropdown-menu-lg-start{--bs-position: start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position: end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 1200px){.dropdown-menu-xl-start{--bs-position: start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position: end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media(min-width: 1400px){.dropdown-menu-xxl-start{--bs-position: start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position: end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:var(--bs-dropdown-spacer)}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid rgba(0,0,0,0);border-bottom:.3em solid;border-left:.3em solid rgba(0,0,0,0)}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:var(--bs-dropdown-spacer)}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid rgba(0,0,0,0);border-right:0;border-bottom:.3em solid rgba(0,0,0,0);border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:var(--bs-dropdown-spacer)}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid rgba(0,0,0,0);border-right:.3em solid;border-bottom:.3em solid rgba(0,0,0,0)}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:var(--bs-dropdown-divider-margin-y) 0;overflow:hidden;border-top:1px solid var(--bs-dropdown-divider-bg);opacity:1}.dropdown-item{display:block;width:100%;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);clear:both;font-weight:400;color:var(--bs-dropdown-link-color);text-align:inherit;text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;white-space:nowrap;background-color:rgba(0,0,0,0);border:0}.dropdown-item:hover,.dropdown-item:focus{color:var(--bs-dropdown-link-hover-color);background-color:var(--bs-dropdown-link-hover-bg)}.dropdown-item.active,.dropdown-item:active{color:var(--bs-dropdown-link-active-color);text-decoration:none;background-color:var(--bs-dropdown-link-active-bg)}.dropdown-item.disabled,.dropdown-item:disabled{color:var(--bs-dropdown-link-disabled-color);pointer-events:none;background-color:rgba(0,0,0,0)}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:var(--bs-dropdown-header-padding-y) var(--bs-dropdown-header-padding-x);margin-bottom:0;font-size:0.875rem;color:var(--bs-dropdown-header-color);white-space:nowrap}.dropdown-item-text{display:block;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);color:var(--bs-dropdown-link-color)}.dropdown-menu-dark{--bs-dropdown-color: #dee2e6;--bs-dropdown-bg: #343a40;--bs-dropdown-border-color: rgba(0, 0, 0, 0.175);--bs-dropdown-box-shadow: ;--bs-dropdown-link-color: #dee2e6;--bs-dropdown-link-hover-color: #fff;--bs-dropdown-divider-bg: rgba(0, 0, 0, 0.175);--bs-dropdown-link-hover-bg: rgba(255, 255, 255, 0.15);--bs-dropdown-link-active-color: #fff;--bs-dropdown-link-active-bg: #2780e3;--bs-dropdown-link-disabled-color: #adb5bd;--bs-dropdown-header-color: #adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;flex:1 1 auto;-webkit-flex:1 1 auto}.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn:hover,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-toolbar{display:flex;display:-webkit-flex;flex-wrap:wrap;-webkit-flex-wrap:wrap;justify-content:flex-start;-webkit-justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>:not(.btn-check:first-child)+.btn,.btn-group>.btn-group:not(:first-child){margin-left:calc(1px*-1)}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;-webkit-flex-direction:column;align-items:flex-start;-webkit-align-items:flex-start;justify-content:center;-webkit-justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child){margin-top:calc(1px*-1)}.nav{--bs-nav-link-padding-x: 1rem;--bs-nav-link-padding-y: 0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color: #2761e3;--bs-nav-link-hover-color: #1f4eb6;--bs-nav-link-disabled-color: rgba(52, 58, 64, 0.75);display:flex;display:-webkit-flex;flex-wrap:wrap;-webkit-flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:var(--bs-nav-link-padding-y) var(--bs-nav-link-padding-x);font-size:var(--bs-nav-link-font-size);font-weight:var(--bs-nav-link-font-weight);color:var(--bs-nav-link-color);text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;background:none;border:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media(prefers-reduced-motion: reduce){.nav-link{transition:none}}.nav-link:hover,.nav-link:focus{color:var(--bs-nav-link-hover-color)}.nav-link:focus-visible{outline:0;box-shadow:0 0 0 .25rem rgba(39,128,227,.25)}.nav-link.disabled,.nav-link:disabled{color:var(--bs-nav-link-disabled-color);pointer-events:none;cursor:default}.nav-tabs{--bs-nav-tabs-border-width: 1px;--bs-nav-tabs-border-color: #dee2e6;--bs-nav-tabs-border-radius: 0.25rem;--bs-nav-tabs-link-hover-border-color: #e9ecef #e9ecef #dee2e6;--bs-nav-tabs-link-active-color: #000;--bs-nav-tabs-link-active-bg: #fff;--bs-nav-tabs-link-active-border-color: #dee2e6 #dee2e6 #fff;border-bottom:var(--bs-nav-tabs-border-width) solid var(--bs-nav-tabs-border-color)}.nav-tabs .nav-link{margin-bottom:calc(-1*var(--bs-nav-tabs-border-width));border:var(--bs-nav-tabs-border-width) solid rgba(0,0,0,0)}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{isolation:isolate;border-color:var(--bs-nav-tabs-link-hover-border-color)}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:var(--bs-nav-tabs-link-active-color);background-color:var(--bs-nav-tabs-link-active-bg);border-color:var(--bs-nav-tabs-link-active-border-color)}.nav-tabs .dropdown-menu{margin-top:calc(-1*var(--bs-nav-tabs-border-width))}.nav-pills{--bs-nav-pills-border-radius: 0.25rem;--bs-nav-pills-link-active-color: #fff;--bs-nav-pills-link-active-bg: #2780e3}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:var(--bs-nav-pills-link-active-color);background-color:var(--bs-nav-pills-link-active-bg)}.nav-underline{--bs-nav-underline-gap: 1rem;--bs-nav-underline-border-width: 0.125rem;--bs-nav-underline-link-active-color: #000;gap:var(--bs-nav-underline-gap)}.nav-underline .nav-link{padding-right:0;padding-left:0;border-bottom:var(--bs-nav-underline-border-width) solid rgba(0,0,0,0)}.nav-underline .nav-link:hover,.nav-underline .nav-link:focus{border-bottom-color:currentcolor}.nav-underline .nav-link.active,.nav-underline .show>.nav-link{font-weight:700;color:var(--bs-nav-underline-link-active-color);border-bottom-color:currentcolor}.nav-fill>.nav-link,.nav-fill .nav-item{flex:1 1 auto;-webkit-flex:1 1 auto;text-align:center}.nav-justified>.nav-link,.nav-justified .nav-item{flex-basis:0;-webkit-flex-basis:0;flex-grow:1;-webkit-flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{--bs-navbar-padding-x: 0;--bs-navbar-padding-y: 0.5rem;--bs-navbar-color: #fdfeff;--bs-navbar-hover-color: rgba(253, 253, 255, 0.8);--bs-navbar-disabled-color: rgba(253, 254, 255, 0.75);--bs-navbar-active-color: #fdfdff;--bs-navbar-brand-padding-y: 0.3125rem;--bs-navbar-brand-margin-end: 1rem;--bs-navbar-brand-font-size: 1.25rem;--bs-navbar-brand-color: #fdfeff;--bs-navbar-brand-hover-color: #fdfdff;--bs-navbar-nav-link-padding-x: 0.5rem;--bs-navbar-toggler-padding-y: 0.25;--bs-navbar-toggler-padding-x: 0;--bs-navbar-toggler-font-size: 1.25rem;--bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='%23fdfeff' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e");--bs-navbar-toggler-border-color: rgba(253, 254, 255, 0);--bs-navbar-toggler-border-radius: 0.25rem;--bs-navbar-toggler-focus-width: 0.25rem;--bs-navbar-toggler-transition: box-shadow 0.15s ease-in-out;position:relative;display:flex;display:-webkit-flex;flex-wrap:wrap;-webkit-flex-wrap:wrap;align-items:center;-webkit-align-items:center;justify-content:space-between;-webkit-justify-content:space-between;padding:var(--bs-navbar-padding-y) var(--bs-navbar-padding-x)}.navbar>.container,.navbar>.container-fluid,.navbar>.container-sm,.navbar>.container-md,.navbar>.container-lg,.navbar>.container-xl,.navbar>.container-xxl{display:flex;display:-webkit-flex;flex-wrap:inherit;-webkit-flex-wrap:inherit;align-items:center;-webkit-align-items:center;justify-content:space-between;-webkit-justify-content:space-between}.navbar-brand{padding-top:var(--bs-navbar-brand-padding-y);padding-bottom:var(--bs-navbar-brand-padding-y);margin-right:var(--bs-navbar-brand-margin-end);font-size:var(--bs-navbar-brand-font-size);color:var(--bs-navbar-brand-color);text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{color:var(--bs-navbar-brand-hover-color)}.navbar-nav{--bs-nav-link-padding-x: 0;--bs-nav-link-padding-y: 0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color: var(--bs-navbar-color);--bs-nav-link-hover-color: var(--bs-navbar-hover-color);--bs-nav-link-disabled-color: var(--bs-navbar-disabled-color);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link.active,.navbar-nav .nav-link.show{color:var(--bs-navbar-active-color)}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem;color:var(--bs-navbar-color)}.navbar-text a,.navbar-text a:hover,.navbar-text a:focus{color:var(--bs-navbar-active-color)}.navbar-collapse{flex-basis:100%;-webkit-flex-basis:100%;flex-grow:1;-webkit-flex-grow:1;align-items:center;-webkit-align-items:center}.navbar-toggler{padding:var(--bs-navbar-toggler-padding-y) var(--bs-navbar-toggler-padding-x);font-size:var(--bs-navbar-toggler-font-size);line-height:1;color:var(--bs-navbar-color);background-color:rgba(0,0,0,0);border:var(--bs-border-width) solid var(--bs-navbar-toggler-border-color);transition:var(--bs-navbar-toggler-transition)}@media(prefers-reduced-motion: reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 var(--bs-navbar-toggler-focus-width)}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-image:var(--bs-navbar-toggler-icon-bg);background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height, 75vh);overflow-y:auto}@media(min-width: 576px){.navbar-expand-sm{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-sm .offcanvas .offcanvas-header{display:none}.navbar-expand-sm .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 768px){.navbar-expand-md{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-md .offcanvas .offcanvas-header{display:none}.navbar-expand-md .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 992px){.navbar-expand-lg{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-lg .offcanvas .offcanvas-header{display:none}.navbar-expand-lg .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 1200px){.navbar-expand-xl{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-xl .offcanvas .offcanvas-header{display:none}.navbar-expand-xl .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}}@media(min-width: 1400px){.navbar-expand-xxl{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand-xxl .navbar-toggler{display:none}.navbar-expand-xxl .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand-xxl .offcanvas .offcanvas-header{display:none}.navbar-expand-xxl .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}}.navbar-expand{flex-wrap:nowrap;-webkit-flex-wrap:nowrap;justify-content:flex-start;-webkit-justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row;-webkit-flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex !important;display:-webkit-flex !important;flex-basis:auto;-webkit-flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .offcanvas{position:static;z-index:auto;flex-grow:1;-webkit-flex-grow:1;width:auto !important;height:auto !important;visibility:visible !important;background-color:rgba(0,0,0,0) !important;border:0 !important;transform:none !important;transition:none}.navbar-expand .offcanvas .offcanvas-header{display:none}.navbar-expand .offcanvas .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible}.navbar-dark,.navbar[data-bs-theme=dark]{--bs-navbar-color: #fdfeff;--bs-navbar-hover-color: rgba(253, 253, 255, 0.8);--bs-navbar-disabled-color: rgba(253, 254, 255, 0.75);--bs-navbar-active-color: #fdfdff;--bs-navbar-brand-color: #fdfeff;--bs-navbar-brand-hover-color: #fdfdff;--bs-navbar-toggler-border-color: rgba(253, 254, 255, 0);--bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='%23fdfeff' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}[data-bs-theme=dark] .navbar-toggler-icon{--bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='%23fdfeff' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.card{--bs-card-spacer-y: 1rem;--bs-card-spacer-x: 1rem;--bs-card-title-spacer-y: 0.5rem;--bs-card-title-color: ;--bs-card-subtitle-color: ;--bs-card-border-width: 1px;--bs-card-border-color: rgba(0, 0, 0, 0.175);--bs-card-border-radius: 0.25rem;--bs-card-box-shadow: ;--bs-card-inner-border-radius: calc(0.25rem - 1px);--bs-card-cap-padding-y: 0.5rem;--bs-card-cap-padding-x: 1rem;--bs-card-cap-bg: rgba(52, 58, 64, 0.25);--bs-card-cap-color: ;--bs-card-height: ;--bs-card-color: ;--bs-card-bg: #fff;--bs-card-img-overlay-padding: 1rem;--bs-card-group-margin: 0.75rem;position:relative;display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;min-width:0;height:var(--bs-card-height);color:var(--bs-body-color);word-wrap:break-word;background-color:var(--bs-card-bg);background-clip:border-box;border:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0}.card>.list-group:last-child{border-bottom-width:0}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;-webkit-flex:1 1 auto;padding:var(--bs-card-spacer-y) var(--bs-card-spacer-x);color:var(--bs-card-color)}.card-title{margin-bottom:var(--bs-card-title-spacer-y);color:var(--bs-card-title-color)}.card-subtitle{margin-top:calc(-0.5*var(--bs-card-title-spacer-y));margin-bottom:0;color:var(--bs-card-subtitle-color)}.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:var(--bs-card-spacer-x)}.card-header{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);margin-bottom:0;color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-bottom:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-footer{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-top:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-header-tabs{margin-right:calc(-0.5*var(--bs-card-cap-padding-x));margin-bottom:calc(-1*var(--bs-card-cap-padding-y));margin-left:calc(-0.5*var(--bs-card-cap-padding-x));border-bottom:0}.card-header-tabs .nav-link.active{background-color:var(--bs-card-bg);border-bottom-color:var(--bs-card-bg)}.card-header-pills{margin-right:calc(-0.5*var(--bs-card-cap-padding-x));margin-left:calc(-0.5*var(--bs-card-cap-padding-x))}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:var(--bs-card-img-overlay-padding)}.card-img,.card-img-top,.card-img-bottom{width:100%}.card-group>.card{margin-bottom:var(--bs-card-group-margin)}@media(min-width: 576px){.card-group{display:flex;display:-webkit-flex;flex-flow:row wrap;-webkit-flex-flow:row wrap}.card-group>.card{flex:1 0 0%;-webkit-flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}}.accordion{--bs-accordion-color: #343a40;--bs-accordion-bg: #fff;--bs-accordion-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, border-radius 0.15s ease;--bs-accordion-border-color: #dee2e6;--bs-accordion-border-width: 1px;--bs-accordion-border-radius: 0.25rem;--bs-accordion-inner-border-radius: calc(0.25rem - 1px);--bs-accordion-btn-padding-x: 1.25rem;--bs-accordion-btn-padding-y: 1rem;--bs-accordion-btn-color: #343a40;--bs-accordion-btn-bg: #fff;--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23343a40'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-icon-width: 1.25rem;--bs-accordion-btn-icon-transform: rotate(-180deg);--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%2310335b'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-focus-border-color: #93c0f1;--bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(39, 128, 227, 0.25);--bs-accordion-body-padding-x: 1.25rem;--bs-accordion-body-padding-y: 1rem;--bs-accordion-active-color: #10335b;--bs-accordion-active-bg: #d4e6f9}.accordion-button{position:relative;display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;width:100%;padding:var(--bs-accordion-btn-padding-y) var(--bs-accordion-btn-padding-x);font-size:1rem;color:var(--bs-accordion-btn-color);text-align:left;background-color:var(--bs-accordion-btn-bg);border:0;overflow-anchor:none;transition:var(--bs-accordion-transition)}@media(prefers-reduced-motion: reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:var(--bs-accordion-active-color);background-color:var(--bs-accordion-active-bg);box-shadow:inset 0 calc(-1*var(--bs-accordion-border-width)) 0 var(--bs-accordion-border-color)}.accordion-button:not(.collapsed)::after{background-image:var(--bs-accordion-btn-active-icon);transform:var(--bs-accordion-btn-icon-transform)}.accordion-button::after{flex-shrink:0;-webkit-flex-shrink:0;width:var(--bs-accordion-btn-icon-width);height:var(--bs-accordion-btn-icon-width);margin-left:auto;content:"";background-image:var(--bs-accordion-btn-icon);background-repeat:no-repeat;background-size:var(--bs-accordion-btn-icon-width);transition:var(--bs-accordion-btn-icon-transition)}@media(prefers-reduced-motion: reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:var(--bs-accordion-btn-focus-border-color);outline:0;box-shadow:var(--bs-accordion-btn-focus-box-shadow)}.accordion-header{margin-bottom:0}.accordion-item{color:var(--bs-accordion-color);background-color:var(--bs-accordion-bg);border:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color)}.accordion-item:not(:first-of-type){border-top:0}.accordion-body{padding:var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x)}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}[data-bs-theme=dark] .accordion-button::after{--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%237db3ee'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%237db3ee'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.breadcrumb{--bs-breadcrumb-padding-x: 0;--bs-breadcrumb-padding-y: 0;--bs-breadcrumb-margin-bottom: 1rem;--bs-breadcrumb-bg: ;--bs-breadcrumb-border-radius: ;--bs-breadcrumb-divider-color: rgba(52, 58, 64, 0.75);--bs-breadcrumb-item-padding-x: 0.5rem;--bs-breadcrumb-item-active-color: rgba(52, 58, 64, 0.75);display:flex;display:-webkit-flex;flex-wrap:wrap;-webkit-flex-wrap:wrap;padding:var(--bs-breadcrumb-padding-y) var(--bs-breadcrumb-padding-x);margin-bottom:var(--bs-breadcrumb-margin-bottom);font-size:var(--bs-breadcrumb-font-size);list-style:none;background-color:var(--bs-breadcrumb-bg)}.breadcrumb-item+.breadcrumb-item{padding-left:var(--bs-breadcrumb-item-padding-x)}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:var(--bs-breadcrumb-item-padding-x);color:var(--bs-breadcrumb-divider-color);content:var(--bs-breadcrumb-divider, ">") /* rtl: var(--bs-breadcrumb-divider, ">") */}.breadcrumb-item.active{color:var(--bs-breadcrumb-item-active-color)}.pagination{--bs-pagination-padding-x: 0.75rem;--bs-pagination-padding-y: 0.375rem;--bs-pagination-font-size:1rem;--bs-pagination-color: #2761e3;--bs-pagination-bg: #fff;--bs-pagination-border-width: 1px;--bs-pagination-border-color: #dee2e6;--bs-pagination-border-radius: 0.25rem;--bs-pagination-hover-color: #1f4eb6;--bs-pagination-hover-bg: #f8f9fa;--bs-pagination-hover-border-color: #dee2e6;--bs-pagination-focus-color: #1f4eb6;--bs-pagination-focus-bg: #e9ecef;--bs-pagination-focus-box-shadow: 0 0 0 0.25rem rgba(39, 128, 227, 0.25);--bs-pagination-active-color: #fff;--bs-pagination-active-bg: #2780e3;--bs-pagination-active-border-color: #2780e3;--bs-pagination-disabled-color: rgba(52, 58, 64, 0.75);--bs-pagination-disabled-bg: #e9ecef;--bs-pagination-disabled-border-color: #dee2e6;display:flex;display:-webkit-flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;padding:var(--bs-pagination-padding-y) var(--bs-pagination-padding-x);font-size:var(--bs-pagination-font-size);color:var(--bs-pagination-color);text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;background-color:var(--bs-pagination-bg);border:var(--bs-pagination-border-width) solid var(--bs-pagination-border-color);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion: reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:var(--bs-pagination-hover-color);background-color:var(--bs-pagination-hover-bg);border-color:var(--bs-pagination-hover-border-color)}.page-link:focus{z-index:3;color:var(--bs-pagination-focus-color);background-color:var(--bs-pagination-focus-bg);outline:0;box-shadow:var(--bs-pagination-focus-box-shadow)}.page-link.active,.active>.page-link{z-index:3;color:var(--bs-pagination-active-color);background-color:var(--bs-pagination-active-bg);border-color:var(--bs-pagination-active-border-color)}.page-link.disabled,.disabled>.page-link{color:var(--bs-pagination-disabled-color);pointer-events:none;background-color:var(--bs-pagination-disabled-bg);border-color:var(--bs-pagination-disabled-border-color)}.page-item:not(:first-child) .page-link{margin-left:calc(1px*-1)}.pagination-lg{--bs-pagination-padding-x: 1.5rem;--bs-pagination-padding-y: 0.75rem;--bs-pagination-font-size:1.25rem;--bs-pagination-border-radius: 0.5rem}.pagination-sm{--bs-pagination-padding-x: 0.5rem;--bs-pagination-padding-y: 0.25rem;--bs-pagination-font-size:0.875rem;--bs-pagination-border-radius: 0.2em}.badge{--bs-badge-padding-x: 0.65em;--bs-badge-padding-y: 0.35em;--bs-badge-font-size:0.75em;--bs-badge-font-weight: 700;--bs-badge-color: #fff;--bs-badge-border-radius: 0.25rem;display:inline-block;padding:var(--bs-badge-padding-y) var(--bs-badge-padding-x);font-size:var(--bs-badge-font-size);font-weight:var(--bs-badge-font-weight);line-height:1;color:var(--bs-badge-color);text-align:center;white-space:nowrap;vertical-align:baseline}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{--bs-alert-bg: transparent;--bs-alert-padding-x: 1rem;--bs-alert-padding-y: 1rem;--bs-alert-margin-bottom: 1rem;--bs-alert-color: inherit;--bs-alert-border-color: transparent;--bs-alert-border: 0 solid var(--bs-alert-border-color);--bs-alert-border-radius: 0.25rem;--bs-alert-link-color: inherit;position:relative;padding:var(--bs-alert-padding-y) var(--bs-alert-padding-x);margin-bottom:var(--bs-alert-margin-bottom);color:var(--bs-alert-color);background-color:var(--bs-alert-bg);border:var(--bs-alert-border)}.alert-heading{color:inherit}.alert-link{font-weight:700;color:var(--bs-alert-link-color)}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-default{--bs-alert-color: var(--bs-default-text-emphasis);--bs-alert-bg: var(--bs-default-bg-subtle);--bs-alert-border-color: var(--bs-default-border-subtle);--bs-alert-link-color: var(--bs-default-text-emphasis)}.alert-primary{--bs-alert-color: var(--bs-primary-text-emphasis);--bs-alert-bg: var(--bs-primary-bg-subtle);--bs-alert-border-color: var(--bs-primary-border-subtle);--bs-alert-link-color: var(--bs-primary-text-emphasis)}.alert-secondary{--bs-alert-color: var(--bs-secondary-text-emphasis);--bs-alert-bg: var(--bs-secondary-bg-subtle);--bs-alert-border-color: var(--bs-secondary-border-subtle);--bs-alert-link-color: var(--bs-secondary-text-emphasis)}.alert-success{--bs-alert-color: var(--bs-success-text-emphasis);--bs-alert-bg: var(--bs-success-bg-subtle);--bs-alert-border-color: var(--bs-success-border-subtle);--bs-alert-link-color: var(--bs-success-text-emphasis)}.alert-info{--bs-alert-color: var(--bs-info-text-emphasis);--bs-alert-bg: var(--bs-info-bg-subtle);--bs-alert-border-color: var(--bs-info-border-subtle);--bs-alert-link-color: var(--bs-info-text-emphasis)}.alert-warning{--bs-alert-color: var(--bs-warning-text-emphasis);--bs-alert-bg: var(--bs-warning-bg-subtle);--bs-alert-border-color: var(--bs-warning-border-subtle);--bs-alert-link-color: var(--bs-warning-text-emphasis)}.alert-danger{--bs-alert-color: var(--bs-danger-text-emphasis);--bs-alert-bg: var(--bs-danger-bg-subtle);--bs-alert-border-color: var(--bs-danger-border-subtle);--bs-alert-link-color: var(--bs-danger-text-emphasis)}.alert-light{--bs-alert-color: var(--bs-light-text-emphasis);--bs-alert-bg: var(--bs-light-bg-subtle);--bs-alert-border-color: var(--bs-light-border-subtle);--bs-alert-link-color: var(--bs-light-text-emphasis)}.alert-dark{--bs-alert-color: var(--bs-dark-text-emphasis);--bs-alert-bg: var(--bs-dark-bg-subtle);--bs-alert-border-color: var(--bs-dark-border-subtle);--bs-alert-link-color: var(--bs-dark-text-emphasis)}@keyframes progress-bar-stripes{0%{background-position-x:.5rem}}.progress,.progress-stacked{--bs-progress-height: 0.5rem;--bs-progress-font-size:0.75rem;--bs-progress-bg: #e9ecef;--bs-progress-border-radius: 0.25rem;--bs-progress-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075);--bs-progress-bar-color: #fff;--bs-progress-bar-bg: #2780e3;--bs-progress-bar-transition: width 0.6s ease;display:flex;display:-webkit-flex;height:var(--bs-progress-height);overflow:hidden;font-size:var(--bs-progress-font-size);background-color:var(--bs-progress-bg)}.progress-bar{display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;justify-content:center;-webkit-justify-content:center;overflow:hidden;color:var(--bs-progress-bar-color);text-align:center;white-space:nowrap;background-color:var(--bs-progress-bar-bg);transition:var(--bs-progress-bar-transition)}@media(prefers-reduced-motion: reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);background-size:var(--bs-progress-height) var(--bs-progress-height)}.progress-stacked>.progress{overflow:visible}.progress-stacked>.progress>.progress-bar{width:100%}.progress-bar-animated{animation:1s linear infinite progress-bar-stripes}@media(prefers-reduced-motion: reduce){.progress-bar-animated{animation:none}}.list-group{--bs-list-group-color: #343a40;--bs-list-group-bg: #fff;--bs-list-group-border-color: #dee2e6;--bs-list-group-border-width: 1px;--bs-list-group-border-radius: 0.25rem;--bs-list-group-item-padding-x: 1rem;--bs-list-group-item-padding-y: 0.5rem;--bs-list-group-action-color: rgba(52, 58, 64, 0.75);--bs-list-group-action-hover-color: #000;--bs-list-group-action-hover-bg: #f8f9fa;--bs-list-group-action-active-color: #343a40;--bs-list-group-action-active-bg: #e9ecef;--bs-list-group-disabled-color: rgba(52, 58, 64, 0.75);--bs-list-group-disabled-bg: #fff;--bs-list-group-active-color: #fff;--bs-list-group-active-bg: #2780e3;--bs-list-group-active-border-color: #2780e3;display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;padding-left:0;margin-bottom:0}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>.list-group-item::before{content:counters(section, ".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:var(--bs-list-group-action-color);text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{z-index:1;color:var(--bs-list-group-action-hover-color);text-decoration:none;background-color:var(--bs-list-group-action-hover-bg)}.list-group-item-action:active{color:var(--bs-list-group-action-active-color);background-color:var(--bs-list-group-action-active-bg)}.list-group-item{position:relative;display:block;padding:var(--bs-list-group-item-padding-y) var(--bs-list-group-item-padding-x);color:var(--bs-list-group-color);text-decoration:none;-webkit-text-decoration:none;-moz-text-decoration:none;-ms-text-decoration:none;-o-text-decoration:none;background-color:var(--bs-list-group-bg);border:var(--bs-list-group-border-width) solid var(--bs-list-group-border-color)}.list-group-item.disabled,.list-group-item:disabled{color:var(--bs-list-group-disabled-color);pointer-events:none;background-color:var(--bs-list-group-disabled-bg)}.list-group-item.active{z-index:2;color:var(--bs-list-group-active-color);background-color:var(--bs-list-group-active-bg);border-color:var(--bs-list-group-active-border-color)}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:calc(-1*var(--bs-list-group-border-width));border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}@media(min-width: 576px){.list-group-horizontal-sm{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 768px){.list-group-horizontal-md{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 992px){.list-group-horizontal-lg{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 1200px){.list-group-horizontal-xl{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media(min-width: 1400px){.list-group-horizontal-xxl{flex-direction:row;-webkit-flex-direction:row}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:calc(-1*var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}.list-group-flush>.list-group-item{border-width:0 0 var(--bs-list-group-border-width)}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-default{--bs-list-group-color: var(--bs-default-text-emphasis);--bs-list-group-bg: var(--bs-default-bg-subtle);--bs-list-group-border-color: var(--bs-default-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-default-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-default-border-subtle);--bs-list-group-active-color: var(--bs-default-bg-subtle);--bs-list-group-active-bg: var(--bs-default-text-emphasis);--bs-list-group-active-border-color: var(--bs-default-text-emphasis)}.list-group-item-primary{--bs-list-group-color: var(--bs-primary-text-emphasis);--bs-list-group-bg: var(--bs-primary-bg-subtle);--bs-list-group-border-color: var(--bs-primary-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-primary-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-primary-border-subtle);--bs-list-group-active-color: var(--bs-primary-bg-subtle);--bs-list-group-active-bg: var(--bs-primary-text-emphasis);--bs-list-group-active-border-color: var(--bs-primary-text-emphasis)}.list-group-item-secondary{--bs-list-group-color: var(--bs-secondary-text-emphasis);--bs-list-group-bg: var(--bs-secondary-bg-subtle);--bs-list-group-border-color: var(--bs-secondary-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-secondary-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-secondary-border-subtle);--bs-list-group-active-color: var(--bs-secondary-bg-subtle);--bs-list-group-active-bg: var(--bs-secondary-text-emphasis);--bs-list-group-active-border-color: var(--bs-secondary-text-emphasis)}.list-group-item-success{--bs-list-group-color: var(--bs-success-text-emphasis);--bs-list-group-bg: var(--bs-success-bg-subtle);--bs-list-group-border-color: var(--bs-success-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-success-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-success-border-subtle);--bs-list-group-active-color: var(--bs-success-bg-subtle);--bs-list-group-active-bg: var(--bs-success-text-emphasis);--bs-list-group-active-border-color: var(--bs-success-text-emphasis)}.list-group-item-info{--bs-list-group-color: var(--bs-info-text-emphasis);--bs-list-group-bg: var(--bs-info-bg-subtle);--bs-list-group-border-color: var(--bs-info-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-info-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-info-border-subtle);--bs-list-group-active-color: var(--bs-info-bg-subtle);--bs-list-group-active-bg: var(--bs-info-text-emphasis);--bs-list-group-active-border-color: var(--bs-info-text-emphasis)}.list-group-item-warning{--bs-list-group-color: var(--bs-warning-text-emphasis);--bs-list-group-bg: var(--bs-warning-bg-subtle);--bs-list-group-border-color: var(--bs-warning-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-warning-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-warning-border-subtle);--bs-list-group-active-color: var(--bs-warning-bg-subtle);--bs-list-group-active-bg: var(--bs-warning-text-emphasis);--bs-list-group-active-border-color: var(--bs-warning-text-emphasis)}.list-group-item-danger{--bs-list-group-color: var(--bs-danger-text-emphasis);--bs-list-group-bg: var(--bs-danger-bg-subtle);--bs-list-group-border-color: var(--bs-danger-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-danger-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-danger-border-subtle);--bs-list-group-active-color: var(--bs-danger-bg-subtle);--bs-list-group-active-bg: var(--bs-danger-text-emphasis);--bs-list-group-active-border-color: var(--bs-danger-text-emphasis)}.list-group-item-light{--bs-list-group-color: var(--bs-light-text-emphasis);--bs-list-group-bg: var(--bs-light-bg-subtle);--bs-list-group-border-color: var(--bs-light-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-light-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-light-border-subtle);--bs-list-group-active-color: var(--bs-light-bg-subtle);--bs-list-group-active-bg: var(--bs-light-text-emphasis);--bs-list-group-active-border-color: var(--bs-light-text-emphasis)}.list-group-item-dark{--bs-list-group-color: var(--bs-dark-text-emphasis);--bs-list-group-bg: var(--bs-dark-bg-subtle);--bs-list-group-border-color: var(--bs-dark-border-subtle);--bs-list-group-action-hover-color: var(--bs-emphasis-color);--bs-list-group-action-hover-bg: var(--bs-dark-border-subtle);--bs-list-group-action-active-color: var(--bs-emphasis-color);--bs-list-group-action-active-bg: var(--bs-dark-border-subtle);--bs-list-group-active-color: var(--bs-dark-bg-subtle);--bs-list-group-active-bg: var(--bs-dark-text-emphasis);--bs-list-group-active-border-color: var(--bs-dark-text-emphasis)}.btn-close{--bs-btn-close-color: #000;--bs-btn-close-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3e%3c/svg%3e");--bs-btn-close-opacity: 0.5;--bs-btn-close-hover-opacity: 0.75;--bs-btn-close-focus-shadow: 0 0 0 0.25rem rgba(39, 128, 227, 0.25);--bs-btn-close-focus-opacity: 1;--bs-btn-close-disabled-opacity: 0.25;--bs-btn-close-white-filter: invert(1) grayscale(100%) brightness(200%);box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:var(--bs-btn-close-color);background:rgba(0,0,0,0) var(--bs-btn-close-bg) center/1em auto no-repeat;border:0;opacity:var(--bs-btn-close-opacity)}.btn-close:hover{color:var(--bs-btn-close-color);text-decoration:none;opacity:var(--bs-btn-close-hover-opacity)}.btn-close:focus{outline:0;box-shadow:var(--bs-btn-close-focus-shadow);opacity:var(--bs-btn-close-focus-opacity)}.btn-close:disabled,.btn-close.disabled{pointer-events:none;user-select:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;opacity:var(--bs-btn-close-disabled-opacity)}.btn-close-white{filter:var(--bs-btn-close-white-filter)}[data-bs-theme=dark] .btn-close{filter:var(--bs-btn-close-white-filter)}.toast{--bs-toast-zindex: 1090;--bs-toast-padding-x: 0.75rem;--bs-toast-padding-y: 0.5rem;--bs-toast-spacing: 1.5rem;--bs-toast-max-width: 350px;--bs-toast-font-size:0.875rem;--bs-toast-color: ;--bs-toast-bg: rgba(255, 255, 255, 0.85);--bs-toast-border-width: 1px;--bs-toast-border-color: rgba(0, 0, 0, 0.175);--bs-toast-border-radius: 0.25rem;--bs-toast-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-toast-header-color: rgba(52, 58, 64, 0.75);--bs-toast-header-bg: rgba(255, 255, 255, 0.85);--bs-toast-header-border-color: rgba(0, 0, 0, 0.175);width:var(--bs-toast-max-width);max-width:100%;font-size:var(--bs-toast-font-size);color:var(--bs-toast-color);pointer-events:auto;background-color:var(--bs-toast-bg);background-clip:padding-box;border:var(--bs-toast-border-width) solid var(--bs-toast-border-color);box-shadow:var(--bs-toast-box-shadow)}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{--bs-toast-zindex: 1090;position:absolute;z-index:var(--bs-toast-zindex);width:max-content;width:-webkit-max-content;width:-moz-max-content;width:-ms-max-content;width:-o-max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:var(--bs-toast-spacing)}.toast-header{display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;padding:var(--bs-toast-padding-y) var(--bs-toast-padding-x);color:var(--bs-toast-header-color);background-color:var(--bs-toast-header-bg);background-clip:padding-box;border-bottom:var(--bs-toast-border-width) solid var(--bs-toast-header-border-color)}.toast-header .btn-close{margin-right:calc(-0.5*var(--bs-toast-padding-x));margin-left:var(--bs-toast-padding-x)}.toast-body{padding:var(--bs-toast-padding-x);word-wrap:break-word}.modal{--bs-modal-zindex: 1055;--bs-modal-width: 500px;--bs-modal-padding: 1rem;--bs-modal-margin: 0.5rem;--bs-modal-color: ;--bs-modal-bg: #fff;--bs-modal-border-color: rgba(0, 0, 0, 0.175);--bs-modal-border-width: 1px;--bs-modal-border-radius: 0.5rem;--bs-modal-box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);--bs-modal-inner-border-radius: calc(0.5rem - 1px);--bs-modal-header-padding-x: 1rem;--bs-modal-header-padding-y: 1rem;--bs-modal-header-padding: 1rem 1rem;--bs-modal-header-border-color: #dee2e6;--bs-modal-header-border-width: 1px;--bs-modal-title-line-height: 1.5;--bs-modal-footer-gap: 0.5rem;--bs-modal-footer-bg: ;--bs-modal-footer-border-color: #dee2e6;--bs-modal-footer-border-width: 1px;position:fixed;top:0;left:0;z-index:var(--bs-modal-zindex);display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:var(--bs-modal-margin);pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translate(0, -50px)}@media(prefers-reduced-motion: reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - var(--bs-modal-margin)*2)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;min-height:calc(100% - var(--bs-modal-margin)*2)}.modal-content{position:relative;display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;width:100%;color:var(--bs-modal-color);pointer-events:auto;background-color:var(--bs-modal-bg);background-clip:padding-box;border:var(--bs-modal-border-width) solid var(--bs-modal-border-color);outline:0}.modal-backdrop{--bs-backdrop-zindex: 1050;--bs-backdrop-bg: #000;--bs-backdrop-opacity: 0.5;position:fixed;top:0;left:0;z-index:var(--bs-backdrop-zindex);width:100vw;height:100vh;background-color:var(--bs-backdrop-bg)}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:var(--bs-backdrop-opacity)}.modal-header{display:flex;display:-webkit-flex;flex-shrink:0;-webkit-flex-shrink:0;align-items:center;-webkit-align-items:center;justify-content:space-between;-webkit-justify-content:space-between;padding:var(--bs-modal-header-padding);border-bottom:var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color)}.modal-header .btn-close{padding:calc(var(--bs-modal-header-padding-y)*.5) calc(var(--bs-modal-header-padding-x)*.5);margin:calc(-0.5*var(--bs-modal-header-padding-y)) calc(-0.5*var(--bs-modal-header-padding-x)) calc(-0.5*var(--bs-modal-header-padding-y)) auto}.modal-title{margin-bottom:0;line-height:var(--bs-modal-title-line-height)}.modal-body{position:relative;flex:1 1 auto;-webkit-flex:1 1 auto;padding:var(--bs-modal-padding)}.modal-footer{display:flex;display:-webkit-flex;flex-shrink:0;-webkit-flex-shrink:0;flex-wrap:wrap;-webkit-flex-wrap:wrap;align-items:center;-webkit-align-items:center;justify-content:flex-end;-webkit-justify-content:flex-end;padding:calc(var(--bs-modal-padding) - var(--bs-modal-footer-gap)*.5);background-color:var(--bs-modal-footer-bg);border-top:var(--bs-modal-footer-border-width) solid var(--bs-modal-footer-border-color)}.modal-footer>*{margin:calc(var(--bs-modal-footer-gap)*.5)}@media(min-width: 576px){.modal{--bs-modal-margin: 1.75rem;--bs-modal-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15)}.modal-dialog{max-width:var(--bs-modal-width);margin-right:auto;margin-left:auto}.modal-sm{--bs-modal-width: 300px}}@media(min-width: 992px){.modal-lg,.modal-xl{--bs-modal-width: 800px}}@media(min-width: 1200px){.modal-xl{--bs-modal-width: 1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0}.modal-fullscreen .modal-body{overflow-y:auto}@media(max-width: 575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}}@media(max-width: 767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}}@media(max-width: 991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}}@media(max-width: 1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}}@media(max-width: 1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}}.tooltip{--bs-tooltip-zindex: 1080;--bs-tooltip-max-width: 200px;--bs-tooltip-padding-x: 0.5rem;--bs-tooltip-padding-y: 0.25rem;--bs-tooltip-margin: ;--bs-tooltip-font-size:0.875rem;--bs-tooltip-color: #fff;--bs-tooltip-bg: #000;--bs-tooltip-border-radius: 0.25rem;--bs-tooltip-opacity: 0.9;--bs-tooltip-arrow-width: 0.8rem;--bs-tooltip-arrow-height: 0.4rem;z-index:var(--bs-tooltip-zindex);display:block;margin:var(--bs-tooltip-margin);font-family:"Source Sans Pro",-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-tooltip-font-size);word-wrap:break-word;opacity:0}.tooltip.show{opacity:var(--bs-tooltip-opacity)}.tooltip .tooltip-arrow{display:block;width:var(--bs-tooltip-arrow-width);height:var(--bs-tooltip-arrow-height)}.tooltip .tooltip-arrow::before{position:absolute;content:"";border-color:rgba(0,0,0,0);border-style:solid}.bs-tooltip-top .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow{bottom:calc(-1*var(--bs-tooltip-arrow-height))}.bs-tooltip-top .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow::before{top:-1px;border-width:var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;border-top-color:var(--bs-tooltip-bg)}.bs-tooltip-end .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow{left:calc(-1*var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-end .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow::before{right:-1px;border-width:calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;border-right-color:var(--bs-tooltip-bg)}.bs-tooltip-bottom .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow{top:calc(-1*var(--bs-tooltip-arrow-height))}.bs-tooltip-bottom .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow::before{bottom:-1px;border-width:0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);border-bottom-color:var(--bs-tooltip-bg)}.bs-tooltip-start .tooltip-arrow,.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow{right:calc(-1*var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-start .tooltip-arrow::before,.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow::before{left:-1px;border-width:calc(var(--bs-tooltip-arrow-width)*.5) 0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);border-left-color:var(--bs-tooltip-bg)}.tooltip-inner{max-width:var(--bs-tooltip-max-width);padding:var(--bs-tooltip-padding-y) var(--bs-tooltip-padding-x);color:var(--bs-tooltip-color);text-align:center;background-color:var(--bs-tooltip-bg)}.popover{--bs-popover-zindex: 1070;--bs-popover-max-width: 276px;--bs-popover-font-size:0.875rem;--bs-popover-bg: #fff;--bs-popover-border-width: 1px;--bs-popover-border-color: rgba(0, 0, 0, 0.175);--bs-popover-border-radius: 0.5rem;--bs-popover-inner-border-radius: calc(0.5rem - 1px);--bs-popover-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-popover-header-padding-x: 1rem;--bs-popover-header-padding-y: 0.5rem;--bs-popover-header-font-size:1rem;--bs-popover-header-color: inherit;--bs-popover-header-bg: #e9ecef;--bs-popover-body-padding-x: 1rem;--bs-popover-body-padding-y: 1rem;--bs-popover-body-color: #343a40;--bs-popover-arrow-width: 1rem;--bs-popover-arrow-height: 0.5rem;--bs-popover-arrow-border: var(--bs-popover-border-color);z-index:var(--bs-popover-zindex);display:block;max-width:var(--bs-popover-max-width);font-family:"Source Sans Pro",-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-popover-font-size);word-wrap:break-word;background-color:var(--bs-popover-bg);background-clip:padding-box;border:var(--bs-popover-border-width) solid var(--bs-popover-border-color)}.popover .popover-arrow{display:block;width:var(--bs-popover-arrow-width);height:var(--bs-popover-arrow-height)}.popover .popover-arrow::before,.popover .popover-arrow::after{position:absolute;display:block;content:"";border-color:rgba(0,0,0,0);border-style:solid;border-width:0}.bs-popover-top>.popover-arrow,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow{bottom:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-top>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after{border-width:var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-top>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before{bottom:0;border-top-color:var(--bs-popover-arrow-border)}.bs-popover-top>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after{bottom:var(--bs-popover-border-width);border-top-color:var(--bs-popover-bg)}.bs-popover-end>.popover-arrow,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow{left:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-end>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after{border-width:calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-end>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before{left:0;border-right-color:var(--bs-popover-arrow-border)}.bs-popover-end>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after{left:var(--bs-popover-border-width);border-right-color:var(--bs-popover-bg)}.bs-popover-bottom>.popover-arrow,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow{top:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-bottom>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after{border-width:0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-bottom>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before{top:0;border-bottom-color:var(--bs-popover-arrow-border)}.bs-popover-bottom>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after{top:var(--bs-popover-border-width);border-bottom-color:var(--bs-popover-bg)}.bs-popover-bottom .popover-header::before,.bs-popover-auto[data-popper-placement^=bottom] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:var(--bs-popover-arrow-width);margin-left:calc(-0.5*var(--bs-popover-arrow-width));content:"";border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-header-bg)}.bs-popover-start>.popover-arrow,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow{right:calc(-1*(var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-start>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after{border-width:calc(var(--bs-popover-arrow-width)*.5) 0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-start>.popover-arrow::before,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before{right:0;border-left-color:var(--bs-popover-arrow-border)}.bs-popover-start>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after{right:var(--bs-popover-border-width);border-left-color:var(--bs-popover-bg)}.popover-header{padding:var(--bs-popover-header-padding-y) var(--bs-popover-header-padding-x);margin-bottom:0;font-size:var(--bs-popover-header-font-size);color:var(--bs-popover-header-color);background-color:var(--bs-popover-header-bg);border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-border-color)}.popover-header:empty{display:none}.popover-body{padding:var(--bs-popover-body-padding-y) var(--bs-popover-body-padding-x);color:var(--bs-popover-body-color)}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y;-webkit-touch-action:pan-y;-moz-touch-action:pan-y;-ms-touch-action:pan-y;-o-touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;backface-visibility:hidden;-webkit-backface-visibility:hidden;-moz-backface-visibility:hidden;-ms-backface-visibility:hidden;-o-backface-visibility:hidden;transition:transform .6s ease-in-out}@media(prefers-reduced-motion: reduce){.carousel-item{transition:none}}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block}.carousel-item-next:not(.carousel-item-start),.active.carousel-item-end{transform:translateX(100%)}.carousel-item-prev:not(.carousel-item-end),.active.carousel-item-start{transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end{z-index:1;opacity:1}.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{z-index:0;opacity:0;transition:opacity 0s .6s}@media(prefers-reduced-motion: reduce){.carousel-fade .active.carousel-item-start,.carousel-fade .active.carousel-item-end{transition:none}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;z-index:1;display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;justify-content:center;-webkit-justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:none;border:0;opacity:.5;transition:opacity .15s ease}@media(prefers-reduced-motion: reduce){.carousel-control-prev,.carousel-control-next{transition:none}}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;display:-webkit-flex;justify-content:center;-webkit-justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;-webkit-flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid rgba(0,0,0,0);border-bottom:10px solid rgba(0,0,0,0);opacity:.5;transition:opacity .6s ease}@media(prefers-reduced-motion: reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-prev-icon,.carousel-dark .carousel-control-next-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}[data-bs-theme=dark] .carousel .carousel-control-prev-icon,[data-bs-theme=dark] .carousel .carousel-control-next-icon,[data-bs-theme=dark].carousel .carousel-control-prev-icon,[data-bs-theme=dark].carousel .carousel-control-next-icon{filter:invert(1) grayscale(100)}[data-bs-theme=dark] .carousel .carousel-indicators [data-bs-target],[data-bs-theme=dark].carousel .carousel-indicators [data-bs-target]{background-color:#000}[data-bs-theme=dark] .carousel .carousel-caption,[data-bs-theme=dark].carousel .carousel-caption{color:#000}.spinner-grow,.spinner-border{display:inline-block;width:var(--bs-spinner-width);height:var(--bs-spinner-height);vertical-align:var(--bs-spinner-vertical-align);border-radius:50%;animation:var(--bs-spinner-animation-speed) linear infinite var(--bs-spinner-animation-name)}@keyframes spinner-border{to{transform:rotate(360deg) /* rtl:ignore */}}.spinner-border{--bs-spinner-width: 2rem;--bs-spinner-height: 2rem;--bs-spinner-vertical-align: -0.125em;--bs-spinner-border-width: 0.25em;--bs-spinner-animation-speed: 0.75s;--bs-spinner-animation-name: spinner-border;border:var(--bs-spinner-border-width) solid currentcolor;border-right-color:rgba(0,0,0,0)}.spinner-border-sm{--bs-spinner-width: 1rem;--bs-spinner-height: 1rem;--bs-spinner-border-width: 0.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{--bs-spinner-width: 2rem;--bs-spinner-height: 2rem;--bs-spinner-vertical-align: -0.125em;--bs-spinner-animation-speed: 0.75s;--bs-spinner-animation-name: spinner-grow;background-color:currentcolor;opacity:0}.spinner-grow-sm{--bs-spinner-width: 1rem;--bs-spinner-height: 1rem}@media(prefers-reduced-motion: reduce){.spinner-border,.spinner-grow{--bs-spinner-animation-speed: 1.5s}}.offcanvas,.offcanvas-xxl,.offcanvas-xl,.offcanvas-lg,.offcanvas-md,.offcanvas-sm{--bs-offcanvas-zindex: 1045;--bs-offcanvas-width: 400px;--bs-offcanvas-height: 30vh;--bs-offcanvas-padding-x: 1rem;--bs-offcanvas-padding-y: 1rem;--bs-offcanvas-color: #343a40;--bs-offcanvas-bg: #fff;--bs-offcanvas-border-width: 1px;--bs-offcanvas-border-color: rgba(0, 0, 0, 0.175);--bs-offcanvas-box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);--bs-offcanvas-transition: transform 0.3s ease-in-out;--bs-offcanvas-title-line-height: 1.5}@media(max-width: 575.98px){.offcanvas-sm{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 575.98px)and (prefers-reduced-motion: reduce){.offcanvas-sm{transition:none}}@media(max-width: 575.98px){.offcanvas-sm.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-sm.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-sm.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-sm.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-sm.showing,.offcanvas-sm.show:not(.hiding){transform:none}.offcanvas-sm.showing,.offcanvas-sm.hiding,.offcanvas-sm.show{visibility:visible}}@media(min-width: 576px){.offcanvas-sm{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-sm .offcanvas-header{display:none}.offcanvas-sm .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 767.98px){.offcanvas-md{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 767.98px)and (prefers-reduced-motion: reduce){.offcanvas-md{transition:none}}@media(max-width: 767.98px){.offcanvas-md.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-md.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-md.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-md.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-md.showing,.offcanvas-md.show:not(.hiding){transform:none}.offcanvas-md.showing,.offcanvas-md.hiding,.offcanvas-md.show{visibility:visible}}@media(min-width: 768px){.offcanvas-md{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-md .offcanvas-header{display:none}.offcanvas-md .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 991.98px){.offcanvas-lg{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 991.98px)and (prefers-reduced-motion: reduce){.offcanvas-lg{transition:none}}@media(max-width: 991.98px){.offcanvas-lg.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-lg.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-lg.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-lg.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-lg.showing,.offcanvas-lg.show:not(.hiding){transform:none}.offcanvas-lg.showing,.offcanvas-lg.hiding,.offcanvas-lg.show{visibility:visible}}@media(min-width: 992px){.offcanvas-lg{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-lg .offcanvas-header{display:none}.offcanvas-lg .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 1199.98px){.offcanvas-xl{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 1199.98px)and (prefers-reduced-motion: reduce){.offcanvas-xl{transition:none}}@media(max-width: 1199.98px){.offcanvas-xl.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-xl.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-xl.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-xl.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xl.showing,.offcanvas-xl.show:not(.hiding){transform:none}.offcanvas-xl.showing,.offcanvas-xl.hiding,.offcanvas-xl.show{visibility:visible}}@media(min-width: 1200px){.offcanvas-xl{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-xl .offcanvas-header{display:none}.offcanvas-xl .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}@media(max-width: 1399.98px){.offcanvas-xxl{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media(max-width: 1399.98px)and (prefers-reduced-motion: reduce){.offcanvas-xxl{transition:none}}@media(max-width: 1399.98px){.offcanvas-xxl.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-xxl.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-xxl.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-xxl.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xxl.showing,.offcanvas-xxl.show:not(.hiding){transform:none}.offcanvas-xxl.showing,.offcanvas-xxl.hiding,.offcanvas-xxl.show{visibility:visible}}@media(min-width: 1400px){.offcanvas-xxl{--bs-offcanvas-height: auto;--bs-offcanvas-border-width: 0;background-color:rgba(0,0,0,0) !important}.offcanvas-xxl .offcanvas-header{display:none}.offcanvas-xxl .offcanvas-body{display:flex;display:-webkit-flex;flex-grow:0;-webkit-flex-grow:0;padding:0;overflow-y:visible;background-color:rgba(0,0,0,0) !important}}.offcanvas{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}@media(prefers-reduced-motion: reduce){.offcanvas{transition:none}}.offcanvas.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas.showing,.offcanvas.show:not(.hiding){transform:none}.offcanvas.showing,.offcanvas.hiding,.offcanvas.show{visibility:visible}.offcanvas-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{display:flex;display:-webkit-flex;align-items:center;-webkit-align-items:center;justify-content:space-between;-webkit-justify-content:space-between;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x)}.offcanvas-header .btn-close{padding:calc(var(--bs-offcanvas-padding-y)*.5) calc(var(--bs-offcanvas-padding-x)*.5);margin-top:calc(-0.5*var(--bs-offcanvas-padding-y));margin-right:calc(-0.5*var(--bs-offcanvas-padding-x));margin-bottom:calc(-0.5*var(--bs-offcanvas-padding-y))}.offcanvas-title{margin-bottom:0;line-height:var(--bs-offcanvas-title-line-height)}.offcanvas-body{flex-grow:1;-webkit-flex-grow:1;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);overflow-y:auto}.placeholder{display:inline-block;min-height:1em;vertical-align:middle;cursor:wait;background-color:currentcolor;opacity:.5}.placeholder.btn::before{display:inline-block;content:""}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{animation:placeholder-glow 2s ease-in-out infinite}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{mask-image:linear-gradient(130deg, #000 55%, rgba(0, 0, 0, 0.8) 75%, #000 95%);-webkit-mask-image:linear-gradient(130deg, #000 55%, rgba(0, 0, 0, 0.8) 75%, #000 95%);mask-size:200% 100%;-webkit-mask-size:200% 100%;animation:placeholder-wave 2s linear infinite}@keyframes placeholder-wave{100%{mask-position:-200% 0%;-webkit-mask-position:-200% 0%}}.clearfix::after{display:block;clear:both;content:""}.text-bg-default{color:#fff !important;background-color:RGBA(var(--bs-default-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-primary{color:#fff !important;background-color:RGBA(var(--bs-primary-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-secondary{color:#fff !important;background-color:RGBA(var(--bs-secondary-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-success{color:#fff !important;background-color:RGBA(var(--bs-success-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-info{color:#fff !important;background-color:RGBA(var(--bs-info-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-warning{color:#fff !important;background-color:RGBA(var(--bs-warning-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-danger{color:#fff !important;background-color:RGBA(var(--bs-danger-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-light{color:#000 !important;background-color:RGBA(var(--bs-light-rgb), var(--bs-bg-opacity, 1)) !important}.text-bg-dark{color:#fff !important;background-color:RGBA(var(--bs-dark-rgb), var(--bs-bg-opacity, 1)) !important}.link-default{color:RGBA(var(--bs-default-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-default-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-default:hover,.link-default:focus{color:RGBA(42, 46, 51, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(42, 46, 51, var(--bs-link-underline-opacity, 1)) !important}.link-primary{color:RGBA(var(--bs-primary-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-primary-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-primary:hover,.link-primary:focus{color:RGBA(31, 102, 182, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(31, 102, 182, var(--bs-link-underline-opacity, 1)) !important}.link-secondary{color:RGBA(var(--bs-secondary-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-secondary-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-secondary:hover,.link-secondary:focus{color:RGBA(42, 46, 51, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(42, 46, 51, var(--bs-link-underline-opacity, 1)) !important}.link-success{color:RGBA(var(--bs-success-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-success-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-success:hover,.link-success:focus{color:RGBA(50, 146, 19, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(50, 146, 19, var(--bs-link-underline-opacity, 1)) !important}.link-info{color:RGBA(var(--bs-info-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-info-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-info:hover,.link-info:focus{color:RGBA(122, 67, 150, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(122, 67, 150, var(--bs-link-underline-opacity, 1)) !important}.link-warning{color:RGBA(var(--bs-warning-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-warning-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-warning:hover,.link-warning:focus{color:RGBA(204, 94, 19, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(204, 94, 19, var(--bs-link-underline-opacity, 1)) !important}.link-danger{color:RGBA(var(--bs-danger-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-danger-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-danger:hover,.link-danger:focus{color:RGBA(204, 0, 46, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(204, 0, 46, var(--bs-link-underline-opacity, 1)) !important}.link-light{color:RGBA(var(--bs-light-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-light-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-light:hover,.link-light:focus{color:RGBA(249, 250, 251, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(249, 250, 251, var(--bs-link-underline-opacity, 1)) !important}.link-dark{color:RGBA(var(--bs-dark-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-dark-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-dark:hover,.link-dark:focus{color:RGBA(42, 46, 51, var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(42, 46, 51, var(--bs-link-underline-opacity, 1)) !important}.link-body-emphasis{color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 1)) !important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-body-emphasis:hover,.link-body-emphasis:focus{color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 0.75)) !important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 0.75)) !important}.focus-ring:focus{outline:0;box-shadow:var(--bs-focus-ring-x, 0) var(--bs-focus-ring-y, 0) var(--bs-focus-ring-blur, 0) var(--bs-focus-ring-width) var(--bs-focus-ring-color)}.icon-link{display:inline-flex;gap:.375rem;align-items:center;-webkit-align-items:center;text-decoration-color:rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 0.5));text-underline-offset:.25em;backface-visibility:hidden;-webkit-backface-visibility:hidden;-moz-backface-visibility:hidden;-ms-backface-visibility:hidden;-o-backface-visibility:hidden}.icon-link>.bi{flex-shrink:0;-webkit-flex-shrink:0;width:1em;height:1em;fill:currentcolor;transition:.2s ease-in-out transform}@media(prefers-reduced-motion: reduce){.icon-link>.bi{transition:none}}.icon-link-hover:hover>.bi,.icon-link-hover:focus-visible>.bi{transform:var(--bs-icon-link-transform, translate3d(0.25em, 0, 0))}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio: 100%}.ratio-4x3{--bs-aspect-ratio: 75%}.ratio-16x9{--bs-aspect-ratio: 56.25%}.ratio-21x9{--bs-aspect-ratio: 42.8571428571%}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:sticky;top:0;z-index:1020}.sticky-bottom{position:sticky;bottom:0;z-index:1020}@media(min-width: 576px){.sticky-sm-top{position:sticky;top:0;z-index:1020}.sticky-sm-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 768px){.sticky-md-top{position:sticky;top:0;z-index:1020}.sticky-md-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 992px){.sticky-lg-top{position:sticky;top:0;z-index:1020}.sticky-lg-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 1200px){.sticky-xl-top{position:sticky;top:0;z-index:1020}.sticky-xl-bottom{position:sticky;bottom:0;z-index:1020}}@media(min-width: 1400px){.sticky-xxl-top{position:sticky;top:0;z-index:1020}.sticky-xxl-bottom{position:sticky;bottom:0;z-index:1020}}.hstack{display:flex;display:-webkit-flex;flex-direction:row;-webkit-flex-direction:row;align-items:center;-webkit-align-items:center;align-self:stretch;-webkit-align-self:stretch}.vstack{display:flex;display:-webkit-flex;flex:1 1 auto;-webkit-flex:1 1 auto;flex-direction:column;-webkit-flex-direction:column;align-self:stretch;-webkit-align-self:stretch}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){width:1px !important;height:1px !important;padding:0 !important;margin:-1px !important;overflow:hidden !important;clip:rect(0, 0, 0, 0) !important;white-space:nowrap !important;border:0 !important}.visually-hidden:not(caption),.visually-hidden-focusable:not(:focus):not(:focus-within):not(caption){position:absolute !important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{display:inline-block;align-self:stretch;-webkit-align-self:stretch;width:1px;min-height:1em;background-color:currentcolor;opacity:.25}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.float-start{float:left !important}.float-end{float:right !important}.float-none{float:none !important}.object-fit-contain{object-fit:contain !important}.object-fit-cover{object-fit:cover !important}.object-fit-fill{object-fit:fill !important}.object-fit-scale{object-fit:scale-down !important}.object-fit-none{object-fit:none !important}.opacity-0{opacity:0 !important}.opacity-25{opacity:.25 !important}.opacity-50{opacity:.5 !important}.opacity-75{opacity:.75 !important}.opacity-100{opacity:1 !important}.overflow-auto{overflow:auto !important}.overflow-hidden{overflow:hidden !important}.overflow-visible{overflow:visible !important}.overflow-scroll{overflow:scroll !important}.overflow-x-auto{overflow-x:auto !important}.overflow-x-hidden{overflow-x:hidden !important}.overflow-x-visible{overflow-x:visible !important}.overflow-x-scroll{overflow-x:scroll !important}.overflow-y-auto{overflow-y:auto !important}.overflow-y-hidden{overflow-y:hidden !important}.overflow-y-visible{overflow-y:visible !important}.overflow-y-scroll{overflow-y:scroll !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-grid{display:grid !important}.d-inline-grid{display:inline-grid !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:flex !important}.d-inline-flex{display:inline-flex !important}.d-none{display:none !important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15) !important}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075) !important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175) !important}.shadow-none{box-shadow:none !important}.focus-ring-default{--bs-focus-ring-color: rgba(var(--bs-default-rgb), var(--bs-focus-ring-opacity))}.focus-ring-primary{--bs-focus-ring-color: rgba(var(--bs-primary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-secondary{--bs-focus-ring-color: rgba(var(--bs-secondary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-success{--bs-focus-ring-color: rgba(var(--bs-success-rgb), var(--bs-focus-ring-opacity))}.focus-ring-info{--bs-focus-ring-color: rgba(var(--bs-info-rgb), var(--bs-focus-ring-opacity))}.focus-ring-warning{--bs-focus-ring-color: rgba(var(--bs-warning-rgb), var(--bs-focus-ring-opacity))}.focus-ring-danger{--bs-focus-ring-color: rgba(var(--bs-danger-rgb), var(--bs-focus-ring-opacity))}.focus-ring-light{--bs-focus-ring-color: rgba(var(--bs-light-rgb), var(--bs-focus-ring-opacity))}.focus-ring-dark{--bs-focus-ring-color: rgba(var(--bs-dark-rgb), var(--bs-focus-ring-opacity))}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:sticky !important}.top-0{top:0 !important}.top-50{top:50% !important}.top-100{top:100% !important}.bottom-0{bottom:0 !important}.bottom-50{bottom:50% !important}.bottom-100{bottom:100% !important}.start-0{left:0 !important}.start-50{left:50% !important}.start-100{left:100% !important}.end-0{right:0 !important}.end-50{right:50% !important}.end-100{right:100% !important}.translate-middle{transform:translate(-50%, -50%) !important}.translate-middle-x{transform:translateX(-50%) !important}.translate-middle-y{transform:translateY(-50%) !important}.border{border:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-0{border:0 !important}.border-top{border-top:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-top-0{border-top:0 !important}.border-end{border-right:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-end-0{border-right:0 !important}.border-bottom{border-bottom:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-bottom-0{border-bottom:0 !important}.border-start{border-left:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important}.border-start-0{border-left:0 !important}.border-default{--bs-border-opacity: 1;border-color:rgba(var(--bs-default-rgb), var(--bs-border-opacity)) !important}.border-primary{--bs-border-opacity: 1;border-color:rgba(var(--bs-primary-rgb), var(--bs-border-opacity)) !important}.border-secondary{--bs-border-opacity: 1;border-color:rgba(var(--bs-secondary-rgb), var(--bs-border-opacity)) !important}.border-success{--bs-border-opacity: 1;border-color:rgba(var(--bs-success-rgb), var(--bs-border-opacity)) !important}.border-info{--bs-border-opacity: 1;border-color:rgba(var(--bs-info-rgb), var(--bs-border-opacity)) !important}.border-warning{--bs-border-opacity: 1;border-color:rgba(var(--bs-warning-rgb), var(--bs-border-opacity)) !important}.border-danger{--bs-border-opacity: 1;border-color:rgba(var(--bs-danger-rgb), var(--bs-border-opacity)) !important}.border-light{--bs-border-opacity: 1;border-color:rgba(var(--bs-light-rgb), var(--bs-border-opacity)) !important}.border-dark{--bs-border-opacity: 1;border-color:rgba(var(--bs-dark-rgb), var(--bs-border-opacity)) !important}.border-black{--bs-border-opacity: 1;border-color:rgba(var(--bs-black-rgb), var(--bs-border-opacity)) !important}.border-white{--bs-border-opacity: 1;border-color:rgba(var(--bs-white-rgb), var(--bs-border-opacity)) !important}.border-primary-subtle{border-color:var(--bs-primary-border-subtle) !important}.border-secondary-subtle{border-color:var(--bs-secondary-border-subtle) !important}.border-success-subtle{border-color:var(--bs-success-border-subtle) !important}.border-info-subtle{border-color:var(--bs-info-border-subtle) !important}.border-warning-subtle{border-color:var(--bs-warning-border-subtle) !important}.border-danger-subtle{border-color:var(--bs-danger-border-subtle) !important}.border-light-subtle{border-color:var(--bs-light-border-subtle) !important}.border-dark-subtle{border-color:var(--bs-dark-border-subtle) !important}.border-1{border-width:1px !important}.border-2{border-width:2px !important}.border-3{border-width:3px !important}.border-4{border-width:4px !important}.border-5{border-width:5px !important}.border-opacity-10{--bs-border-opacity: 0.1}.border-opacity-25{--bs-border-opacity: 0.25}.border-opacity-50{--bs-border-opacity: 0.5}.border-opacity-75{--bs-border-opacity: 0.75}.border-opacity-100{--bs-border-opacity: 1}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.mw-100{max-width:100% !important}.vw-100{width:100vw !important}.min-vw-100{min-width:100vw !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mh-100{max-height:100% !important}.vh-100{height:100vh !important}.min-vh-100{min-height:100vh !important}.flex-fill{flex:1 1 auto !important}.flex-row{flex-direction:row !important}.flex-column{flex-direction:column !important}.flex-row-reverse{flex-direction:row-reverse !important}.flex-column-reverse{flex-direction:column-reverse !important}.flex-grow-0{flex-grow:0 !important}.flex-grow-1{flex-grow:1 !important}.flex-shrink-0{flex-shrink:0 !important}.flex-shrink-1{flex-shrink:1 !important}.flex-wrap{flex-wrap:wrap !important}.flex-nowrap{flex-wrap:nowrap !important}.flex-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-start{justify-content:flex-start !important}.justify-content-end{justify-content:flex-end !important}.justify-content-center{justify-content:center !important}.justify-content-between{justify-content:space-between !important}.justify-content-around{justify-content:space-around !important}.justify-content-evenly{justify-content:space-evenly !important}.align-items-start{align-items:flex-start !important}.align-items-end{align-items:flex-end !important}.align-items-center{align-items:center !important}.align-items-baseline{align-items:baseline !important}.align-items-stretch{align-items:stretch !important}.align-content-start{align-content:flex-start !important}.align-content-end{align-content:flex-end !important}.align-content-center{align-content:center !important}.align-content-between{align-content:space-between !important}.align-content-around{align-content:space-around !important}.align-content-stretch{align-content:stretch !important}.align-self-auto{align-self:auto !important}.align-self-start{align-self:flex-start !important}.align-self-end{align-self:flex-end !important}.align-self-center{align-self:center !important}.align-self-baseline{align-self:baseline !important}.align-self-stretch{align-self:stretch !important}.order-first{order:-1 !important}.order-0{order:0 !important}.order-1{order:1 !important}.order-2{order:2 !important}.order-3{order:3 !important}.order-4{order:4 !important}.order-5{order:5 !important}.order-last{order:6 !important}.m-0{margin:0 !important}.m-1{margin:.25rem !important}.m-2{margin:.5rem !important}.m-3{margin:1rem !important}.m-4{margin:1.5rem !important}.m-5{margin:3rem !important}.m-auto{margin:auto !important}.mx-0{margin-right:0 !important;margin-left:0 !important}.mx-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-3{margin-right:1rem !important;margin-left:1rem !important}.mx-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-5{margin-right:3rem !important;margin-left:3rem !important}.mx-auto{margin-right:auto !important;margin-left:auto !important}.my-0{margin-top:0 !important;margin-bottom:0 !important}.my-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-0{margin-top:0 !important}.mt-1{margin-top:.25rem !important}.mt-2{margin-top:.5rem !important}.mt-3{margin-top:1rem !important}.mt-4{margin-top:1.5rem !important}.mt-5{margin-top:3rem !important}.mt-auto{margin-top:auto !important}.me-0{margin-right:0 !important}.me-1{margin-right:.25rem !important}.me-2{margin-right:.5rem !important}.me-3{margin-right:1rem !important}.me-4{margin-right:1.5rem !important}.me-5{margin-right:3rem !important}.me-auto{margin-right:auto !important}.mb-0{margin-bottom:0 !important}.mb-1{margin-bottom:.25rem !important}.mb-2{margin-bottom:.5rem !important}.mb-3{margin-bottom:1rem !important}.mb-4{margin-bottom:1.5rem !important}.mb-5{margin-bottom:3rem !important}.mb-auto{margin-bottom:auto !important}.ms-0{margin-left:0 !important}.ms-1{margin-left:.25rem !important}.ms-2{margin-left:.5rem !important}.ms-3{margin-left:1rem !important}.ms-4{margin-left:1.5rem !important}.ms-5{margin-left:3rem !important}.ms-auto{margin-left:auto !important}.p-0{padding:0 !important}.p-1{padding:.25rem !important}.p-2{padding:.5rem !important}.p-3{padding:1rem !important}.p-4{padding:1.5rem !important}.p-5{padding:3rem !important}.px-0{padding-right:0 !important;padding-left:0 !important}.px-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-3{padding-right:1rem !important;padding-left:1rem !important}.px-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-5{padding-right:3rem !important;padding-left:3rem !important}.py-0{padding-top:0 !important;padding-bottom:0 !important}.py-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-0{padding-top:0 !important}.pt-1{padding-top:.25rem !important}.pt-2{padding-top:.5rem !important}.pt-3{padding-top:1rem !important}.pt-4{padding-top:1.5rem !important}.pt-5{padding-top:3rem !important}.pe-0{padding-right:0 !important}.pe-1{padding-right:.25rem !important}.pe-2{padding-right:.5rem !important}.pe-3{padding-right:1rem !important}.pe-4{padding-right:1.5rem !important}.pe-5{padding-right:3rem !important}.pb-0{padding-bottom:0 !important}.pb-1{padding-bottom:.25rem !important}.pb-2{padding-bottom:.5rem !important}.pb-3{padding-bottom:1rem !important}.pb-4{padding-bottom:1.5rem !important}.pb-5{padding-bottom:3rem !important}.ps-0{padding-left:0 !important}.ps-1{padding-left:.25rem !important}.ps-2{padding-left:.5rem !important}.ps-3{padding-left:1rem !important}.ps-4{padding-left:1.5rem !important}.ps-5{padding-left:3rem !important}.gap-0{gap:0 !important}.gap-1{gap:.25rem !important}.gap-2{gap:.5rem !important}.gap-3{gap:1rem !important}.gap-4{gap:1.5rem !important}.gap-5{gap:3rem !important}.row-gap-0{row-gap:0 !important}.row-gap-1{row-gap:.25rem !important}.row-gap-2{row-gap:.5rem !important}.row-gap-3{row-gap:1rem !important}.row-gap-4{row-gap:1.5rem !important}.row-gap-5{row-gap:3rem !important}.column-gap-0{column-gap:0 !important}.column-gap-1{column-gap:.25rem !important}.column-gap-2{column-gap:.5rem !important}.column-gap-3{column-gap:1rem !important}.column-gap-4{column-gap:1.5rem !important}.column-gap-5{column-gap:3rem !important}.font-monospace{font-family:var(--bs-font-monospace) !important}.fs-1{font-size:calc(1.325rem + 0.9vw) !important}.fs-2{font-size:calc(1.29rem + 0.48vw) !important}.fs-3{font-size:calc(1.27rem + 0.24vw) !important}.fs-4{font-size:1.25rem !important}.fs-5{font-size:1.1rem !important}.fs-6{font-size:1rem !important}.fst-italic{font-style:italic !important}.fst-normal{font-style:normal !important}.fw-lighter{font-weight:lighter !important}.fw-light{font-weight:300 !important}.fw-normal{font-weight:400 !important}.fw-medium{font-weight:500 !important}.fw-semibold{font-weight:600 !important}.fw-bold{font-weight:700 !important}.fw-bolder{font-weight:bolder !important}.lh-1{line-height:1 !important}.lh-sm{line-height:1.25 !important}.lh-base{line-height:1.5 !important}.lh-lg{line-height:2 !important}.text-start{text-align:left !important}.text-end{text-align:right !important}.text-center{text-align:center !important}.text-decoration-none{text-decoration:none !important}.text-decoration-underline{text-decoration:underline !important}.text-decoration-line-through{text-decoration:line-through !important}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.text-wrap{white-space:normal !important}.text-nowrap{white-space:nowrap !important}.text-break{word-wrap:break-word !important;word-break:break-word !important}.text-default{--bs-text-opacity: 1;color:rgba(var(--bs-default-rgb), var(--bs-text-opacity)) !important}.text-primary{--bs-text-opacity: 1;color:rgba(var(--bs-primary-rgb), var(--bs-text-opacity)) !important}.text-secondary{--bs-text-opacity: 1;color:rgba(var(--bs-secondary-rgb), var(--bs-text-opacity)) !important}.text-success{--bs-text-opacity: 1;color:rgba(var(--bs-success-rgb), var(--bs-text-opacity)) !important}.text-info{--bs-text-opacity: 1;color:rgba(var(--bs-info-rgb), var(--bs-text-opacity)) !important}.text-warning{--bs-text-opacity: 1;color:rgba(var(--bs-warning-rgb), var(--bs-text-opacity)) !important}.text-danger{--bs-text-opacity: 1;color:rgba(var(--bs-danger-rgb), var(--bs-text-opacity)) !important}.text-light{--bs-text-opacity: 1;color:rgba(var(--bs-light-rgb), var(--bs-text-opacity)) !important}.text-dark{--bs-text-opacity: 1;color:rgba(var(--bs-dark-rgb), var(--bs-text-opacity)) !important}.text-black{--bs-text-opacity: 1;color:rgba(var(--bs-black-rgb), var(--bs-text-opacity)) !important}.text-white{--bs-text-opacity: 1;color:rgba(var(--bs-white-rgb), var(--bs-text-opacity)) !important}.text-body{--bs-text-opacity: 1;color:rgba(var(--bs-body-color-rgb), var(--bs-text-opacity)) !important}.text-muted{--bs-text-opacity: 1;color:var(--bs-secondary-color) !important}.text-black-50{--bs-text-opacity: 1;color:rgba(0,0,0,.5) !important}.text-white-50{--bs-text-opacity: 1;color:rgba(255,255,255,.5) !important}.text-body-secondary{--bs-text-opacity: 1;color:var(--bs-secondary-color) !important}.text-body-tertiary{--bs-text-opacity: 1;color:var(--bs-tertiary-color) !important}.text-body-emphasis{--bs-text-opacity: 1;color:var(--bs-emphasis-color) !important}.text-reset{--bs-text-opacity: 1;color:inherit !important}.text-opacity-25{--bs-text-opacity: 0.25}.text-opacity-50{--bs-text-opacity: 0.5}.text-opacity-75{--bs-text-opacity: 0.75}.text-opacity-100{--bs-text-opacity: 1}.text-primary-emphasis{color:var(--bs-primary-text-emphasis) !important}.text-secondary-emphasis{color:var(--bs-secondary-text-emphasis) !important}.text-success-emphasis{color:var(--bs-success-text-emphasis) !important}.text-info-emphasis{color:var(--bs-info-text-emphasis) !important}.text-warning-emphasis{color:var(--bs-warning-text-emphasis) !important}.text-danger-emphasis{color:var(--bs-danger-text-emphasis) !important}.text-light-emphasis{color:var(--bs-light-text-emphasis) !important}.text-dark-emphasis{color:var(--bs-dark-text-emphasis) !important}.link-opacity-10{--bs-link-opacity: 0.1}.link-opacity-10-hover:hover{--bs-link-opacity: 0.1}.link-opacity-25{--bs-link-opacity: 0.25}.link-opacity-25-hover:hover{--bs-link-opacity: 0.25}.link-opacity-50{--bs-link-opacity: 0.5}.link-opacity-50-hover:hover{--bs-link-opacity: 0.5}.link-opacity-75{--bs-link-opacity: 0.75}.link-opacity-75-hover:hover{--bs-link-opacity: 0.75}.link-opacity-100{--bs-link-opacity: 1}.link-opacity-100-hover:hover{--bs-link-opacity: 1}.link-offset-1{text-underline-offset:.125em !important}.link-offset-1-hover:hover{text-underline-offset:.125em !important}.link-offset-2{text-underline-offset:.25em !important}.link-offset-2-hover:hover{text-underline-offset:.25em !important}.link-offset-3{text-underline-offset:.375em !important}.link-offset-3-hover:hover{text-underline-offset:.375em !important}.link-underline-default{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-default-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-primary{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-primary-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-secondary{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-secondary-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-success{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-success-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-info{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-info-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-warning{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-warning-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-danger{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-danger-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-light{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-light-rgb), var(--bs-link-underline-opacity)) !important}.link-underline-dark{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-dark-rgb), var(--bs-link-underline-opacity)) !important}.link-underline{--bs-link-underline-opacity: 1;text-decoration-color:rgba(var(--bs-link-color-rgb), var(--bs-link-underline-opacity, 1)) !important}.link-underline-opacity-0{--bs-link-underline-opacity: 0}.link-underline-opacity-0-hover:hover{--bs-link-underline-opacity: 0}.link-underline-opacity-10{--bs-link-underline-opacity: 0.1}.link-underline-opacity-10-hover:hover{--bs-link-underline-opacity: 0.1}.link-underline-opacity-25{--bs-link-underline-opacity: 0.25}.link-underline-opacity-25-hover:hover{--bs-link-underline-opacity: 0.25}.link-underline-opacity-50{--bs-link-underline-opacity: 0.5}.link-underline-opacity-50-hover:hover{--bs-link-underline-opacity: 0.5}.link-underline-opacity-75{--bs-link-underline-opacity: 0.75}.link-underline-opacity-75-hover:hover{--bs-link-underline-opacity: 0.75}.link-underline-opacity-100{--bs-link-underline-opacity: 1}.link-underline-opacity-100-hover:hover{--bs-link-underline-opacity: 1}.bg-default{--bs-bg-opacity: 1;background-color:rgba(var(--bs-default-rgb), var(--bs-bg-opacity)) !important}.bg-primary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-primary-rgb), var(--bs-bg-opacity)) !important}.bg-secondary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-secondary-rgb), var(--bs-bg-opacity)) !important}.bg-success{--bs-bg-opacity: 1;background-color:rgba(var(--bs-success-rgb), var(--bs-bg-opacity)) !important}.bg-info{--bs-bg-opacity: 1;background-color:rgba(var(--bs-info-rgb), var(--bs-bg-opacity)) !important}.bg-warning{--bs-bg-opacity: 1;background-color:rgba(var(--bs-warning-rgb), var(--bs-bg-opacity)) !important}.bg-danger{--bs-bg-opacity: 1;background-color:rgba(var(--bs-danger-rgb), var(--bs-bg-opacity)) !important}.bg-light{--bs-bg-opacity: 1;background-color:rgba(var(--bs-light-rgb), var(--bs-bg-opacity)) !important}.bg-dark{--bs-bg-opacity: 1;background-color:rgba(var(--bs-dark-rgb), var(--bs-bg-opacity)) !important}.bg-black{--bs-bg-opacity: 1;background-color:rgba(var(--bs-black-rgb), var(--bs-bg-opacity)) !important}.bg-white{--bs-bg-opacity: 1;background-color:rgba(var(--bs-white-rgb), var(--bs-bg-opacity)) !important}.bg-body{--bs-bg-opacity: 1;background-color:rgba(var(--bs-body-bg-rgb), var(--bs-bg-opacity)) !important}.bg-transparent{--bs-bg-opacity: 1;background-color:rgba(0,0,0,0) !important}.bg-body-secondary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-secondary-bg-rgb), var(--bs-bg-opacity)) !important}.bg-body-tertiary{--bs-bg-opacity: 1;background-color:rgba(var(--bs-tertiary-bg-rgb), var(--bs-bg-opacity)) !important}.bg-opacity-10{--bs-bg-opacity: 0.1}.bg-opacity-25{--bs-bg-opacity: 0.25}.bg-opacity-50{--bs-bg-opacity: 0.5}.bg-opacity-75{--bs-bg-opacity: 0.75}.bg-opacity-100{--bs-bg-opacity: 1}.bg-primary-subtle{background-color:var(--bs-primary-bg-subtle) !important}.bg-secondary-subtle{background-color:var(--bs-secondary-bg-subtle) !important}.bg-success-subtle{background-color:var(--bs-success-bg-subtle) !important}.bg-info-subtle{background-color:var(--bs-info-bg-subtle) !important}.bg-warning-subtle{background-color:var(--bs-warning-bg-subtle) !important}.bg-danger-subtle{background-color:var(--bs-danger-bg-subtle) !important}.bg-light-subtle{background-color:var(--bs-light-bg-subtle) !important}.bg-dark-subtle{background-color:var(--bs-dark-bg-subtle) !important}.bg-gradient{background-image:var(--bs-gradient) !important}.user-select-all{user-select:all !important}.user-select-auto{user-select:auto !important}.user-select-none{user-select:none !important}.pe-none{pointer-events:none !important}.pe-auto{pointer-events:auto !important}.rounded{border-radius:var(--bs-border-radius) !important}.rounded-0{border-radius:0 !important}.rounded-1{border-radius:var(--bs-border-radius-sm) !important}.rounded-2{border-radius:var(--bs-border-radius) !important}.rounded-3{border-radius:var(--bs-border-radius-lg) !important}.rounded-4{border-radius:var(--bs-border-radius-xl) !important}.rounded-5{border-radius:var(--bs-border-radius-xxl) !important}.rounded-circle{border-radius:50% !important}.rounded-pill{border-radius:var(--bs-border-radius-pill) !important}.rounded-top{border-top-left-radius:var(--bs-border-radius) !important;border-top-right-radius:var(--bs-border-radius) !important}.rounded-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-top-1{border-top-left-radius:var(--bs-border-radius-sm) !important;border-top-right-radius:var(--bs-border-radius-sm) !important}.rounded-top-2{border-top-left-radius:var(--bs-border-radius) !important;border-top-right-radius:var(--bs-border-radius) !important}.rounded-top-3{border-top-left-radius:var(--bs-border-radius-lg) !important;border-top-right-radius:var(--bs-border-radius-lg) !important}.rounded-top-4{border-top-left-radius:var(--bs-border-radius-xl) !important;border-top-right-radius:var(--bs-border-radius-xl) !important}.rounded-top-5{border-top-left-radius:var(--bs-border-radius-xxl) !important;border-top-right-radius:var(--bs-border-radius-xxl) !important}.rounded-top-circle{border-top-left-radius:50% !important;border-top-right-radius:50% !important}.rounded-top-pill{border-top-left-radius:var(--bs-border-radius-pill) !important;border-top-right-radius:var(--bs-border-radius-pill) !important}.rounded-end{border-top-right-radius:var(--bs-border-radius) !important;border-bottom-right-radius:var(--bs-border-radius) !important}.rounded-end-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-end-1{border-top-right-radius:var(--bs-border-radius-sm) !important;border-bottom-right-radius:var(--bs-border-radius-sm) !important}.rounded-end-2{border-top-right-radius:var(--bs-border-radius) !important;border-bottom-right-radius:var(--bs-border-radius) !important}.rounded-end-3{border-top-right-radius:var(--bs-border-radius-lg) !important;border-bottom-right-radius:var(--bs-border-radius-lg) !important}.rounded-end-4{border-top-right-radius:var(--bs-border-radius-xl) !important;border-bottom-right-radius:var(--bs-border-radius-xl) !important}.rounded-end-5{border-top-right-radius:var(--bs-border-radius-xxl) !important;border-bottom-right-radius:var(--bs-border-radius-xxl) !important}.rounded-end-circle{border-top-right-radius:50% !important;border-bottom-right-radius:50% !important}.rounded-end-pill{border-top-right-radius:var(--bs-border-radius-pill) !important;border-bottom-right-radius:var(--bs-border-radius-pill) !important}.rounded-bottom{border-bottom-right-radius:var(--bs-border-radius) !important;border-bottom-left-radius:var(--bs-border-radius) !important}.rounded-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-bottom-1{border-bottom-right-radius:var(--bs-border-radius-sm) !important;border-bottom-left-radius:var(--bs-border-radius-sm) !important}.rounded-bottom-2{border-bottom-right-radius:var(--bs-border-radius) !important;border-bottom-left-radius:var(--bs-border-radius) !important}.rounded-bottom-3{border-bottom-right-radius:var(--bs-border-radius-lg) !important;border-bottom-left-radius:var(--bs-border-radius-lg) !important}.rounded-bottom-4{border-bottom-right-radius:var(--bs-border-radius-xl) !important;border-bottom-left-radius:var(--bs-border-radius-xl) !important}.rounded-bottom-5{border-bottom-right-radius:var(--bs-border-radius-xxl) !important;border-bottom-left-radius:var(--bs-border-radius-xxl) !important}.rounded-bottom-circle{border-bottom-right-radius:50% !important;border-bottom-left-radius:50% !important}.rounded-bottom-pill{border-bottom-right-radius:var(--bs-border-radius-pill) !important;border-bottom-left-radius:var(--bs-border-radius-pill) !important}.rounded-start{border-bottom-left-radius:var(--bs-border-radius) !important;border-top-left-radius:var(--bs-border-radius) !important}.rounded-start-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-start-1{border-bottom-left-radius:var(--bs-border-radius-sm) !important;border-top-left-radius:var(--bs-border-radius-sm) !important}.rounded-start-2{border-bottom-left-radius:var(--bs-border-radius) !important;border-top-left-radius:var(--bs-border-radius) !important}.rounded-start-3{border-bottom-left-radius:var(--bs-border-radius-lg) !important;border-top-left-radius:var(--bs-border-radius-lg) !important}.rounded-start-4{border-bottom-left-radius:var(--bs-border-radius-xl) !important;border-top-left-radius:var(--bs-border-radius-xl) !important}.rounded-start-5{border-bottom-left-radius:var(--bs-border-radius-xxl) !important;border-top-left-radius:var(--bs-border-radius-xxl) !important}.rounded-start-circle{border-bottom-left-radius:50% !important;border-top-left-radius:50% !important}.rounded-start-pill{border-bottom-left-radius:var(--bs-border-radius-pill) !important;border-top-left-radius:var(--bs-border-radius-pill) !important}.visible{visibility:visible !important}.invisible{visibility:hidden !important}.z-n1{z-index:-1 !important}.z-0{z-index:0 !important}.z-1{z-index:1 !important}.z-2{z-index:2 !important}.z-3{z-index:3 !important}@media(min-width: 576px){.float-sm-start{float:left !important}.float-sm-end{float:right !important}.float-sm-none{float:none !important}.object-fit-sm-contain{object-fit:contain !important}.object-fit-sm-cover{object-fit:cover !important}.object-fit-sm-fill{object-fit:fill !important}.object-fit-sm-scale{object-fit:scale-down !important}.object-fit-sm-none{object-fit:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-grid{display:grid !important}.d-sm-inline-grid{display:inline-grid !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:flex !important}.d-sm-inline-flex{display:inline-flex !important}.d-sm-none{display:none !important}.flex-sm-fill{flex:1 1 auto !important}.flex-sm-row{flex-direction:row !important}.flex-sm-column{flex-direction:column !important}.flex-sm-row-reverse{flex-direction:row-reverse !important}.flex-sm-column-reverse{flex-direction:column-reverse !important}.flex-sm-grow-0{flex-grow:0 !important}.flex-sm-grow-1{flex-grow:1 !important}.flex-sm-shrink-0{flex-shrink:0 !important}.flex-sm-shrink-1{flex-shrink:1 !important}.flex-sm-wrap{flex-wrap:wrap !important}.flex-sm-nowrap{flex-wrap:nowrap !important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-sm-start{justify-content:flex-start !important}.justify-content-sm-end{justify-content:flex-end !important}.justify-content-sm-center{justify-content:center !important}.justify-content-sm-between{justify-content:space-between !important}.justify-content-sm-around{justify-content:space-around !important}.justify-content-sm-evenly{justify-content:space-evenly !important}.align-items-sm-start{align-items:flex-start !important}.align-items-sm-end{align-items:flex-end !important}.align-items-sm-center{align-items:center !important}.align-items-sm-baseline{align-items:baseline !important}.align-items-sm-stretch{align-items:stretch !important}.align-content-sm-start{align-content:flex-start !important}.align-content-sm-end{align-content:flex-end !important}.align-content-sm-center{align-content:center !important}.align-content-sm-between{align-content:space-between !important}.align-content-sm-around{align-content:space-around !important}.align-content-sm-stretch{align-content:stretch !important}.align-self-sm-auto{align-self:auto !important}.align-self-sm-start{align-self:flex-start !important}.align-self-sm-end{align-self:flex-end !important}.align-self-sm-center{align-self:center !important}.align-self-sm-baseline{align-self:baseline !important}.align-self-sm-stretch{align-self:stretch !important}.order-sm-first{order:-1 !important}.order-sm-0{order:0 !important}.order-sm-1{order:1 !important}.order-sm-2{order:2 !important}.order-sm-3{order:3 !important}.order-sm-4{order:4 !important}.order-sm-5{order:5 !important}.order-sm-last{order:6 !important}.m-sm-0{margin:0 !important}.m-sm-1{margin:.25rem !important}.m-sm-2{margin:.5rem !important}.m-sm-3{margin:1rem !important}.m-sm-4{margin:1.5rem !important}.m-sm-5{margin:3rem !important}.m-sm-auto{margin:auto !important}.mx-sm-0{margin-right:0 !important;margin-left:0 !important}.mx-sm-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-sm-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-sm-3{margin-right:1rem !important;margin-left:1rem !important}.mx-sm-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-sm-5{margin-right:3rem !important;margin-left:3rem !important}.mx-sm-auto{margin-right:auto !important;margin-left:auto !important}.my-sm-0{margin-top:0 !important;margin-bottom:0 !important}.my-sm-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-sm-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-sm-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-sm-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-sm-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-sm-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-sm-0{margin-top:0 !important}.mt-sm-1{margin-top:.25rem !important}.mt-sm-2{margin-top:.5rem !important}.mt-sm-3{margin-top:1rem !important}.mt-sm-4{margin-top:1.5rem !important}.mt-sm-5{margin-top:3rem !important}.mt-sm-auto{margin-top:auto !important}.me-sm-0{margin-right:0 !important}.me-sm-1{margin-right:.25rem !important}.me-sm-2{margin-right:.5rem !important}.me-sm-3{margin-right:1rem !important}.me-sm-4{margin-right:1.5rem !important}.me-sm-5{margin-right:3rem !important}.me-sm-auto{margin-right:auto !important}.mb-sm-0{margin-bottom:0 !important}.mb-sm-1{margin-bottom:.25rem !important}.mb-sm-2{margin-bottom:.5rem !important}.mb-sm-3{margin-bottom:1rem !important}.mb-sm-4{margin-bottom:1.5rem !important}.mb-sm-5{margin-bottom:3rem !important}.mb-sm-auto{margin-bottom:auto !important}.ms-sm-0{margin-left:0 !important}.ms-sm-1{margin-left:.25rem !important}.ms-sm-2{margin-left:.5rem !important}.ms-sm-3{margin-left:1rem !important}.ms-sm-4{margin-left:1.5rem !important}.ms-sm-5{margin-left:3rem !important}.ms-sm-auto{margin-left:auto !important}.p-sm-0{padding:0 !important}.p-sm-1{padding:.25rem !important}.p-sm-2{padding:.5rem !important}.p-sm-3{padding:1rem !important}.p-sm-4{padding:1.5rem !important}.p-sm-5{padding:3rem !important}.px-sm-0{padding-right:0 !important;padding-left:0 !important}.px-sm-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-sm-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-sm-3{padding-right:1rem !important;padding-left:1rem !important}.px-sm-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-sm-5{padding-right:3rem !important;padding-left:3rem !important}.py-sm-0{padding-top:0 !important;padding-bottom:0 !important}.py-sm-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-sm-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-sm-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-sm-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-sm-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-sm-0{padding-top:0 !important}.pt-sm-1{padding-top:.25rem !important}.pt-sm-2{padding-top:.5rem !important}.pt-sm-3{padding-top:1rem !important}.pt-sm-4{padding-top:1.5rem !important}.pt-sm-5{padding-top:3rem !important}.pe-sm-0{padding-right:0 !important}.pe-sm-1{padding-right:.25rem !important}.pe-sm-2{padding-right:.5rem !important}.pe-sm-3{padding-right:1rem !important}.pe-sm-4{padding-right:1.5rem !important}.pe-sm-5{padding-right:3rem !important}.pb-sm-0{padding-bottom:0 !important}.pb-sm-1{padding-bottom:.25rem !important}.pb-sm-2{padding-bottom:.5rem !important}.pb-sm-3{padding-bottom:1rem !important}.pb-sm-4{padding-bottom:1.5rem !important}.pb-sm-5{padding-bottom:3rem !important}.ps-sm-0{padding-left:0 !important}.ps-sm-1{padding-left:.25rem !important}.ps-sm-2{padding-left:.5rem !important}.ps-sm-3{padding-left:1rem !important}.ps-sm-4{padding-left:1.5rem !important}.ps-sm-5{padding-left:3rem !important}.gap-sm-0{gap:0 !important}.gap-sm-1{gap:.25rem !important}.gap-sm-2{gap:.5rem !important}.gap-sm-3{gap:1rem !important}.gap-sm-4{gap:1.5rem !important}.gap-sm-5{gap:3rem !important}.row-gap-sm-0{row-gap:0 !important}.row-gap-sm-1{row-gap:.25rem !important}.row-gap-sm-2{row-gap:.5rem !important}.row-gap-sm-3{row-gap:1rem !important}.row-gap-sm-4{row-gap:1.5rem !important}.row-gap-sm-5{row-gap:3rem !important}.column-gap-sm-0{column-gap:0 !important}.column-gap-sm-1{column-gap:.25rem !important}.column-gap-sm-2{column-gap:.5rem !important}.column-gap-sm-3{column-gap:1rem !important}.column-gap-sm-4{column-gap:1.5rem !important}.column-gap-sm-5{column-gap:3rem !important}.text-sm-start{text-align:left !important}.text-sm-end{text-align:right !important}.text-sm-center{text-align:center !important}}@media(min-width: 768px){.float-md-start{float:left !important}.float-md-end{float:right !important}.float-md-none{float:none !important}.object-fit-md-contain{object-fit:contain !important}.object-fit-md-cover{object-fit:cover !important}.object-fit-md-fill{object-fit:fill !important}.object-fit-md-scale{object-fit:scale-down !important}.object-fit-md-none{object-fit:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-grid{display:grid !important}.d-md-inline-grid{display:inline-grid !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:flex !important}.d-md-inline-flex{display:inline-flex !important}.d-md-none{display:none !important}.flex-md-fill{flex:1 1 auto !important}.flex-md-row{flex-direction:row !important}.flex-md-column{flex-direction:column !important}.flex-md-row-reverse{flex-direction:row-reverse !important}.flex-md-column-reverse{flex-direction:column-reverse !important}.flex-md-grow-0{flex-grow:0 !important}.flex-md-grow-1{flex-grow:1 !important}.flex-md-shrink-0{flex-shrink:0 !important}.flex-md-shrink-1{flex-shrink:1 !important}.flex-md-wrap{flex-wrap:wrap !important}.flex-md-nowrap{flex-wrap:nowrap !important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-md-start{justify-content:flex-start !important}.justify-content-md-end{justify-content:flex-end !important}.justify-content-md-center{justify-content:center !important}.justify-content-md-between{justify-content:space-between !important}.justify-content-md-around{justify-content:space-around !important}.justify-content-md-evenly{justify-content:space-evenly !important}.align-items-md-start{align-items:flex-start !important}.align-items-md-end{align-items:flex-end !important}.align-items-md-center{align-items:center !important}.align-items-md-baseline{align-items:baseline !important}.align-items-md-stretch{align-items:stretch !important}.align-content-md-start{align-content:flex-start !important}.align-content-md-end{align-content:flex-end !important}.align-content-md-center{align-content:center !important}.align-content-md-between{align-content:space-between !important}.align-content-md-around{align-content:space-around !important}.align-content-md-stretch{align-content:stretch !important}.align-self-md-auto{align-self:auto !important}.align-self-md-start{align-self:flex-start !important}.align-self-md-end{align-self:flex-end !important}.align-self-md-center{align-self:center !important}.align-self-md-baseline{align-self:baseline !important}.align-self-md-stretch{align-self:stretch !important}.order-md-first{order:-1 !important}.order-md-0{order:0 !important}.order-md-1{order:1 !important}.order-md-2{order:2 !important}.order-md-3{order:3 !important}.order-md-4{order:4 !important}.order-md-5{order:5 !important}.order-md-last{order:6 !important}.m-md-0{margin:0 !important}.m-md-1{margin:.25rem !important}.m-md-2{margin:.5rem !important}.m-md-3{margin:1rem !important}.m-md-4{margin:1.5rem !important}.m-md-5{margin:3rem !important}.m-md-auto{margin:auto !important}.mx-md-0{margin-right:0 !important;margin-left:0 !important}.mx-md-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-md-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-md-3{margin-right:1rem !important;margin-left:1rem !important}.mx-md-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-md-5{margin-right:3rem !important;margin-left:3rem !important}.mx-md-auto{margin-right:auto !important;margin-left:auto !important}.my-md-0{margin-top:0 !important;margin-bottom:0 !important}.my-md-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-md-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-md-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-md-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-md-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-md-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-md-0{margin-top:0 !important}.mt-md-1{margin-top:.25rem !important}.mt-md-2{margin-top:.5rem !important}.mt-md-3{margin-top:1rem !important}.mt-md-4{margin-top:1.5rem !important}.mt-md-5{margin-top:3rem !important}.mt-md-auto{margin-top:auto !important}.me-md-0{margin-right:0 !important}.me-md-1{margin-right:.25rem !important}.me-md-2{margin-right:.5rem !important}.me-md-3{margin-right:1rem !important}.me-md-4{margin-right:1.5rem !important}.me-md-5{margin-right:3rem !important}.me-md-auto{margin-right:auto !important}.mb-md-0{margin-bottom:0 !important}.mb-md-1{margin-bottom:.25rem !important}.mb-md-2{margin-bottom:.5rem !important}.mb-md-3{margin-bottom:1rem !important}.mb-md-4{margin-bottom:1.5rem !important}.mb-md-5{margin-bottom:3rem !important}.mb-md-auto{margin-bottom:auto !important}.ms-md-0{margin-left:0 !important}.ms-md-1{margin-left:.25rem !important}.ms-md-2{margin-left:.5rem !important}.ms-md-3{margin-left:1rem !important}.ms-md-4{margin-left:1.5rem !important}.ms-md-5{margin-left:3rem !important}.ms-md-auto{margin-left:auto !important}.p-md-0{padding:0 !important}.p-md-1{padding:.25rem !important}.p-md-2{padding:.5rem !important}.p-md-3{padding:1rem !important}.p-md-4{padding:1.5rem !important}.p-md-5{padding:3rem !important}.px-md-0{padding-right:0 !important;padding-left:0 !important}.px-md-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-md-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-md-3{padding-right:1rem !important;padding-left:1rem !important}.px-md-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-md-5{padding-right:3rem !important;padding-left:3rem !important}.py-md-0{padding-top:0 !important;padding-bottom:0 !important}.py-md-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-md-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-md-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-md-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-md-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-md-0{padding-top:0 !important}.pt-md-1{padding-top:.25rem !important}.pt-md-2{padding-top:.5rem !important}.pt-md-3{padding-top:1rem !important}.pt-md-4{padding-top:1.5rem !important}.pt-md-5{padding-top:3rem !important}.pe-md-0{padding-right:0 !important}.pe-md-1{padding-right:.25rem !important}.pe-md-2{padding-right:.5rem !important}.pe-md-3{padding-right:1rem !important}.pe-md-4{padding-right:1.5rem !important}.pe-md-5{padding-right:3rem !important}.pb-md-0{padding-bottom:0 !important}.pb-md-1{padding-bottom:.25rem !important}.pb-md-2{padding-bottom:.5rem !important}.pb-md-3{padding-bottom:1rem !important}.pb-md-4{padding-bottom:1.5rem !important}.pb-md-5{padding-bottom:3rem !important}.ps-md-0{padding-left:0 !important}.ps-md-1{padding-left:.25rem !important}.ps-md-2{padding-left:.5rem !important}.ps-md-3{padding-left:1rem !important}.ps-md-4{padding-left:1.5rem !important}.ps-md-5{padding-left:3rem !important}.gap-md-0{gap:0 !important}.gap-md-1{gap:.25rem !important}.gap-md-2{gap:.5rem !important}.gap-md-3{gap:1rem !important}.gap-md-4{gap:1.5rem !important}.gap-md-5{gap:3rem !important}.row-gap-md-0{row-gap:0 !important}.row-gap-md-1{row-gap:.25rem !important}.row-gap-md-2{row-gap:.5rem !important}.row-gap-md-3{row-gap:1rem !important}.row-gap-md-4{row-gap:1.5rem !important}.row-gap-md-5{row-gap:3rem !important}.column-gap-md-0{column-gap:0 !important}.column-gap-md-1{column-gap:.25rem !important}.column-gap-md-2{column-gap:.5rem !important}.column-gap-md-3{column-gap:1rem !important}.column-gap-md-4{column-gap:1.5rem !important}.column-gap-md-5{column-gap:3rem !important}.text-md-start{text-align:left !important}.text-md-end{text-align:right !important}.text-md-center{text-align:center !important}}@media(min-width: 992px){.float-lg-start{float:left !important}.float-lg-end{float:right !important}.float-lg-none{float:none !important}.object-fit-lg-contain{object-fit:contain !important}.object-fit-lg-cover{object-fit:cover !important}.object-fit-lg-fill{object-fit:fill !important}.object-fit-lg-scale{object-fit:scale-down !important}.object-fit-lg-none{object-fit:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-grid{display:grid !important}.d-lg-inline-grid{display:inline-grid !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:flex !important}.d-lg-inline-flex{display:inline-flex !important}.d-lg-none{display:none !important}.flex-lg-fill{flex:1 1 auto !important}.flex-lg-row{flex-direction:row !important}.flex-lg-column{flex-direction:column !important}.flex-lg-row-reverse{flex-direction:row-reverse !important}.flex-lg-column-reverse{flex-direction:column-reverse !important}.flex-lg-grow-0{flex-grow:0 !important}.flex-lg-grow-1{flex-grow:1 !important}.flex-lg-shrink-0{flex-shrink:0 !important}.flex-lg-shrink-1{flex-shrink:1 !important}.flex-lg-wrap{flex-wrap:wrap !important}.flex-lg-nowrap{flex-wrap:nowrap !important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-lg-start{justify-content:flex-start !important}.justify-content-lg-end{justify-content:flex-end !important}.justify-content-lg-center{justify-content:center !important}.justify-content-lg-between{justify-content:space-between !important}.justify-content-lg-around{justify-content:space-around !important}.justify-content-lg-evenly{justify-content:space-evenly !important}.align-items-lg-start{align-items:flex-start !important}.align-items-lg-end{align-items:flex-end !important}.align-items-lg-center{align-items:center !important}.align-items-lg-baseline{align-items:baseline !important}.align-items-lg-stretch{align-items:stretch !important}.align-content-lg-start{align-content:flex-start !important}.align-content-lg-end{align-content:flex-end !important}.align-content-lg-center{align-content:center !important}.align-content-lg-between{align-content:space-between !important}.align-content-lg-around{align-content:space-around !important}.align-content-lg-stretch{align-content:stretch !important}.align-self-lg-auto{align-self:auto !important}.align-self-lg-start{align-self:flex-start !important}.align-self-lg-end{align-self:flex-end !important}.align-self-lg-center{align-self:center !important}.align-self-lg-baseline{align-self:baseline !important}.align-self-lg-stretch{align-self:stretch !important}.order-lg-first{order:-1 !important}.order-lg-0{order:0 !important}.order-lg-1{order:1 !important}.order-lg-2{order:2 !important}.order-lg-3{order:3 !important}.order-lg-4{order:4 !important}.order-lg-5{order:5 !important}.order-lg-last{order:6 !important}.m-lg-0{margin:0 !important}.m-lg-1{margin:.25rem !important}.m-lg-2{margin:.5rem !important}.m-lg-3{margin:1rem !important}.m-lg-4{margin:1.5rem !important}.m-lg-5{margin:3rem !important}.m-lg-auto{margin:auto !important}.mx-lg-0{margin-right:0 !important;margin-left:0 !important}.mx-lg-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-lg-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-lg-3{margin-right:1rem !important;margin-left:1rem !important}.mx-lg-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-lg-5{margin-right:3rem !important;margin-left:3rem !important}.mx-lg-auto{margin-right:auto !important;margin-left:auto !important}.my-lg-0{margin-top:0 !important;margin-bottom:0 !important}.my-lg-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-lg-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-lg-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-lg-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-lg-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-lg-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-lg-0{margin-top:0 !important}.mt-lg-1{margin-top:.25rem !important}.mt-lg-2{margin-top:.5rem !important}.mt-lg-3{margin-top:1rem !important}.mt-lg-4{margin-top:1.5rem !important}.mt-lg-5{margin-top:3rem !important}.mt-lg-auto{margin-top:auto !important}.me-lg-0{margin-right:0 !important}.me-lg-1{margin-right:.25rem !important}.me-lg-2{margin-right:.5rem !important}.me-lg-3{margin-right:1rem !important}.me-lg-4{margin-right:1.5rem !important}.me-lg-5{margin-right:3rem !important}.me-lg-auto{margin-right:auto !important}.mb-lg-0{margin-bottom:0 !important}.mb-lg-1{margin-bottom:.25rem !important}.mb-lg-2{margin-bottom:.5rem !important}.mb-lg-3{margin-bottom:1rem !important}.mb-lg-4{margin-bottom:1.5rem !important}.mb-lg-5{margin-bottom:3rem !important}.mb-lg-auto{margin-bottom:auto !important}.ms-lg-0{margin-left:0 !important}.ms-lg-1{margin-left:.25rem !important}.ms-lg-2{margin-left:.5rem !important}.ms-lg-3{margin-left:1rem !important}.ms-lg-4{margin-left:1.5rem !important}.ms-lg-5{margin-left:3rem !important}.ms-lg-auto{margin-left:auto !important}.p-lg-0{padding:0 !important}.p-lg-1{padding:.25rem !important}.p-lg-2{padding:.5rem !important}.p-lg-3{padding:1rem !important}.p-lg-4{padding:1.5rem !important}.p-lg-5{padding:3rem !important}.px-lg-0{padding-right:0 !important;padding-left:0 !important}.px-lg-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-lg-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-lg-3{padding-right:1rem !important;padding-left:1rem !important}.px-lg-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-lg-5{padding-right:3rem !important;padding-left:3rem !important}.py-lg-0{padding-top:0 !important;padding-bottom:0 !important}.py-lg-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-lg-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-lg-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-lg-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-lg-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-lg-0{padding-top:0 !important}.pt-lg-1{padding-top:.25rem !important}.pt-lg-2{padding-top:.5rem !important}.pt-lg-3{padding-top:1rem !important}.pt-lg-4{padding-top:1.5rem !important}.pt-lg-5{padding-top:3rem !important}.pe-lg-0{padding-right:0 !important}.pe-lg-1{padding-right:.25rem !important}.pe-lg-2{padding-right:.5rem !important}.pe-lg-3{padding-right:1rem !important}.pe-lg-4{padding-right:1.5rem !important}.pe-lg-5{padding-right:3rem !important}.pb-lg-0{padding-bottom:0 !important}.pb-lg-1{padding-bottom:.25rem !important}.pb-lg-2{padding-bottom:.5rem !important}.pb-lg-3{padding-bottom:1rem !important}.pb-lg-4{padding-bottom:1.5rem !important}.pb-lg-5{padding-bottom:3rem !important}.ps-lg-0{padding-left:0 !important}.ps-lg-1{padding-left:.25rem !important}.ps-lg-2{padding-left:.5rem !important}.ps-lg-3{padding-left:1rem !important}.ps-lg-4{padding-left:1.5rem !important}.ps-lg-5{padding-left:3rem !important}.gap-lg-0{gap:0 !important}.gap-lg-1{gap:.25rem !important}.gap-lg-2{gap:.5rem !important}.gap-lg-3{gap:1rem !important}.gap-lg-4{gap:1.5rem !important}.gap-lg-5{gap:3rem !important}.row-gap-lg-0{row-gap:0 !important}.row-gap-lg-1{row-gap:.25rem !important}.row-gap-lg-2{row-gap:.5rem !important}.row-gap-lg-3{row-gap:1rem !important}.row-gap-lg-4{row-gap:1.5rem !important}.row-gap-lg-5{row-gap:3rem !important}.column-gap-lg-0{column-gap:0 !important}.column-gap-lg-1{column-gap:.25rem !important}.column-gap-lg-2{column-gap:.5rem !important}.column-gap-lg-3{column-gap:1rem !important}.column-gap-lg-4{column-gap:1.5rem !important}.column-gap-lg-5{column-gap:3rem !important}.text-lg-start{text-align:left !important}.text-lg-end{text-align:right !important}.text-lg-center{text-align:center !important}}@media(min-width: 1200px){.float-xl-start{float:left !important}.float-xl-end{float:right !important}.float-xl-none{float:none !important}.object-fit-xl-contain{object-fit:contain !important}.object-fit-xl-cover{object-fit:cover !important}.object-fit-xl-fill{object-fit:fill !important}.object-fit-xl-scale{object-fit:scale-down !important}.object-fit-xl-none{object-fit:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-grid{display:grid !important}.d-xl-inline-grid{display:inline-grid !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:flex !important}.d-xl-inline-flex{display:inline-flex !important}.d-xl-none{display:none !important}.flex-xl-fill{flex:1 1 auto !important}.flex-xl-row{flex-direction:row !important}.flex-xl-column{flex-direction:column !important}.flex-xl-row-reverse{flex-direction:row-reverse !important}.flex-xl-column-reverse{flex-direction:column-reverse !important}.flex-xl-grow-0{flex-grow:0 !important}.flex-xl-grow-1{flex-grow:1 !important}.flex-xl-shrink-0{flex-shrink:0 !important}.flex-xl-shrink-1{flex-shrink:1 !important}.flex-xl-wrap{flex-wrap:wrap !important}.flex-xl-nowrap{flex-wrap:nowrap !important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-xl-start{justify-content:flex-start !important}.justify-content-xl-end{justify-content:flex-end !important}.justify-content-xl-center{justify-content:center !important}.justify-content-xl-between{justify-content:space-between !important}.justify-content-xl-around{justify-content:space-around !important}.justify-content-xl-evenly{justify-content:space-evenly !important}.align-items-xl-start{align-items:flex-start !important}.align-items-xl-end{align-items:flex-end !important}.align-items-xl-center{align-items:center !important}.align-items-xl-baseline{align-items:baseline !important}.align-items-xl-stretch{align-items:stretch !important}.align-content-xl-start{align-content:flex-start !important}.align-content-xl-end{align-content:flex-end !important}.align-content-xl-center{align-content:center !important}.align-content-xl-between{align-content:space-between !important}.align-content-xl-around{align-content:space-around !important}.align-content-xl-stretch{align-content:stretch !important}.align-self-xl-auto{align-self:auto !important}.align-self-xl-start{align-self:flex-start !important}.align-self-xl-end{align-self:flex-end !important}.align-self-xl-center{align-self:center !important}.align-self-xl-baseline{align-self:baseline !important}.align-self-xl-stretch{align-self:stretch !important}.order-xl-first{order:-1 !important}.order-xl-0{order:0 !important}.order-xl-1{order:1 !important}.order-xl-2{order:2 !important}.order-xl-3{order:3 !important}.order-xl-4{order:4 !important}.order-xl-5{order:5 !important}.order-xl-last{order:6 !important}.m-xl-0{margin:0 !important}.m-xl-1{margin:.25rem !important}.m-xl-2{margin:.5rem !important}.m-xl-3{margin:1rem !important}.m-xl-4{margin:1.5rem !important}.m-xl-5{margin:3rem !important}.m-xl-auto{margin:auto !important}.mx-xl-0{margin-right:0 !important;margin-left:0 !important}.mx-xl-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-xl-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-xl-3{margin-right:1rem !important;margin-left:1rem !important}.mx-xl-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-xl-5{margin-right:3rem !important;margin-left:3rem !important}.mx-xl-auto{margin-right:auto !important;margin-left:auto !important}.my-xl-0{margin-top:0 !important;margin-bottom:0 !important}.my-xl-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-xl-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-xl-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-xl-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-xl-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-xl-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-xl-0{margin-top:0 !important}.mt-xl-1{margin-top:.25rem !important}.mt-xl-2{margin-top:.5rem !important}.mt-xl-3{margin-top:1rem !important}.mt-xl-4{margin-top:1.5rem !important}.mt-xl-5{margin-top:3rem !important}.mt-xl-auto{margin-top:auto !important}.me-xl-0{margin-right:0 !important}.me-xl-1{margin-right:.25rem !important}.me-xl-2{margin-right:.5rem !important}.me-xl-3{margin-right:1rem !important}.me-xl-4{margin-right:1.5rem !important}.me-xl-5{margin-right:3rem !important}.me-xl-auto{margin-right:auto !important}.mb-xl-0{margin-bottom:0 !important}.mb-xl-1{margin-bottom:.25rem !important}.mb-xl-2{margin-bottom:.5rem !important}.mb-xl-3{margin-bottom:1rem !important}.mb-xl-4{margin-bottom:1.5rem !important}.mb-xl-5{margin-bottom:3rem !important}.mb-xl-auto{margin-bottom:auto !important}.ms-xl-0{margin-left:0 !important}.ms-xl-1{margin-left:.25rem !important}.ms-xl-2{margin-left:.5rem !important}.ms-xl-3{margin-left:1rem !important}.ms-xl-4{margin-left:1.5rem !important}.ms-xl-5{margin-left:3rem !important}.ms-xl-auto{margin-left:auto !important}.p-xl-0{padding:0 !important}.p-xl-1{padding:.25rem !important}.p-xl-2{padding:.5rem !important}.p-xl-3{padding:1rem !important}.p-xl-4{padding:1.5rem !important}.p-xl-5{padding:3rem !important}.px-xl-0{padding-right:0 !important;padding-left:0 !important}.px-xl-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-xl-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-xl-3{padding-right:1rem !important;padding-left:1rem !important}.px-xl-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-xl-5{padding-right:3rem !important;padding-left:3rem !important}.py-xl-0{padding-top:0 !important;padding-bottom:0 !important}.py-xl-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-xl-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-xl-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-xl-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-xl-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-xl-0{padding-top:0 !important}.pt-xl-1{padding-top:.25rem !important}.pt-xl-2{padding-top:.5rem !important}.pt-xl-3{padding-top:1rem !important}.pt-xl-4{padding-top:1.5rem !important}.pt-xl-5{padding-top:3rem !important}.pe-xl-0{padding-right:0 !important}.pe-xl-1{padding-right:.25rem !important}.pe-xl-2{padding-right:.5rem !important}.pe-xl-3{padding-right:1rem !important}.pe-xl-4{padding-right:1.5rem !important}.pe-xl-5{padding-right:3rem !important}.pb-xl-0{padding-bottom:0 !important}.pb-xl-1{padding-bottom:.25rem !important}.pb-xl-2{padding-bottom:.5rem !important}.pb-xl-3{padding-bottom:1rem !important}.pb-xl-4{padding-bottom:1.5rem !important}.pb-xl-5{padding-bottom:3rem !important}.ps-xl-0{padding-left:0 !important}.ps-xl-1{padding-left:.25rem !important}.ps-xl-2{padding-left:.5rem !important}.ps-xl-3{padding-left:1rem !important}.ps-xl-4{padding-left:1.5rem !important}.ps-xl-5{padding-left:3rem !important}.gap-xl-0{gap:0 !important}.gap-xl-1{gap:.25rem !important}.gap-xl-2{gap:.5rem !important}.gap-xl-3{gap:1rem !important}.gap-xl-4{gap:1.5rem !important}.gap-xl-5{gap:3rem !important}.row-gap-xl-0{row-gap:0 !important}.row-gap-xl-1{row-gap:.25rem !important}.row-gap-xl-2{row-gap:.5rem !important}.row-gap-xl-3{row-gap:1rem !important}.row-gap-xl-4{row-gap:1.5rem !important}.row-gap-xl-5{row-gap:3rem !important}.column-gap-xl-0{column-gap:0 !important}.column-gap-xl-1{column-gap:.25rem !important}.column-gap-xl-2{column-gap:.5rem !important}.column-gap-xl-3{column-gap:1rem !important}.column-gap-xl-4{column-gap:1.5rem !important}.column-gap-xl-5{column-gap:3rem !important}.text-xl-start{text-align:left !important}.text-xl-end{text-align:right !important}.text-xl-center{text-align:center !important}}@media(min-width: 1400px){.float-xxl-start{float:left !important}.float-xxl-end{float:right !important}.float-xxl-none{float:none !important}.object-fit-xxl-contain{object-fit:contain !important}.object-fit-xxl-cover{object-fit:cover !important}.object-fit-xxl-fill{object-fit:fill !important}.object-fit-xxl-scale{object-fit:scale-down !important}.object-fit-xxl-none{object-fit:none !important}.d-xxl-inline{display:inline !important}.d-xxl-inline-block{display:inline-block !important}.d-xxl-block{display:block !important}.d-xxl-grid{display:grid !important}.d-xxl-inline-grid{display:inline-grid !important}.d-xxl-table{display:table !important}.d-xxl-table-row{display:table-row !important}.d-xxl-table-cell{display:table-cell !important}.d-xxl-flex{display:flex !important}.d-xxl-inline-flex{display:inline-flex !important}.d-xxl-none{display:none !important}.flex-xxl-fill{flex:1 1 auto !important}.flex-xxl-row{flex-direction:row !important}.flex-xxl-column{flex-direction:column !important}.flex-xxl-row-reverse{flex-direction:row-reverse !important}.flex-xxl-column-reverse{flex-direction:column-reverse !important}.flex-xxl-grow-0{flex-grow:0 !important}.flex-xxl-grow-1{flex-grow:1 !important}.flex-xxl-shrink-0{flex-shrink:0 !important}.flex-xxl-shrink-1{flex-shrink:1 !important}.flex-xxl-wrap{flex-wrap:wrap !important}.flex-xxl-nowrap{flex-wrap:nowrap !important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse !important}.justify-content-xxl-start{justify-content:flex-start !important}.justify-content-xxl-end{justify-content:flex-end !important}.justify-content-xxl-center{justify-content:center !important}.justify-content-xxl-between{justify-content:space-between !important}.justify-content-xxl-around{justify-content:space-around !important}.justify-content-xxl-evenly{justify-content:space-evenly !important}.align-items-xxl-start{align-items:flex-start !important}.align-items-xxl-end{align-items:flex-end !important}.align-items-xxl-center{align-items:center !important}.align-items-xxl-baseline{align-items:baseline !important}.align-items-xxl-stretch{align-items:stretch !important}.align-content-xxl-start{align-content:flex-start !important}.align-content-xxl-end{align-content:flex-end !important}.align-content-xxl-center{align-content:center !important}.align-content-xxl-between{align-content:space-between !important}.align-content-xxl-around{align-content:space-around !important}.align-content-xxl-stretch{align-content:stretch !important}.align-self-xxl-auto{align-self:auto !important}.align-self-xxl-start{align-self:flex-start !important}.align-self-xxl-end{align-self:flex-end !important}.align-self-xxl-center{align-self:center !important}.align-self-xxl-baseline{align-self:baseline !important}.align-self-xxl-stretch{align-self:stretch !important}.order-xxl-first{order:-1 !important}.order-xxl-0{order:0 !important}.order-xxl-1{order:1 !important}.order-xxl-2{order:2 !important}.order-xxl-3{order:3 !important}.order-xxl-4{order:4 !important}.order-xxl-5{order:5 !important}.order-xxl-last{order:6 !important}.m-xxl-0{margin:0 !important}.m-xxl-1{margin:.25rem !important}.m-xxl-2{margin:.5rem !important}.m-xxl-3{margin:1rem !important}.m-xxl-4{margin:1.5rem !important}.m-xxl-5{margin:3rem !important}.m-xxl-auto{margin:auto !important}.mx-xxl-0{margin-right:0 !important;margin-left:0 !important}.mx-xxl-1{margin-right:.25rem !important;margin-left:.25rem !important}.mx-xxl-2{margin-right:.5rem !important;margin-left:.5rem !important}.mx-xxl-3{margin-right:1rem !important;margin-left:1rem !important}.mx-xxl-4{margin-right:1.5rem !important;margin-left:1.5rem !important}.mx-xxl-5{margin-right:3rem !important;margin-left:3rem !important}.mx-xxl-auto{margin-right:auto !important;margin-left:auto !important}.my-xxl-0{margin-top:0 !important;margin-bottom:0 !important}.my-xxl-1{margin-top:.25rem !important;margin-bottom:.25rem !important}.my-xxl-2{margin-top:.5rem !important;margin-bottom:.5rem !important}.my-xxl-3{margin-top:1rem !important;margin-bottom:1rem !important}.my-xxl-4{margin-top:1.5rem !important;margin-bottom:1.5rem !important}.my-xxl-5{margin-top:3rem !important;margin-bottom:3rem !important}.my-xxl-auto{margin-top:auto !important;margin-bottom:auto !important}.mt-xxl-0{margin-top:0 !important}.mt-xxl-1{margin-top:.25rem !important}.mt-xxl-2{margin-top:.5rem !important}.mt-xxl-3{margin-top:1rem !important}.mt-xxl-4{margin-top:1.5rem !important}.mt-xxl-5{margin-top:3rem !important}.mt-xxl-auto{margin-top:auto !important}.me-xxl-0{margin-right:0 !important}.me-xxl-1{margin-right:.25rem !important}.me-xxl-2{margin-right:.5rem !important}.me-xxl-3{margin-right:1rem !important}.me-xxl-4{margin-right:1.5rem !important}.me-xxl-5{margin-right:3rem !important}.me-xxl-auto{margin-right:auto !important}.mb-xxl-0{margin-bottom:0 !important}.mb-xxl-1{margin-bottom:.25rem !important}.mb-xxl-2{margin-bottom:.5rem !important}.mb-xxl-3{margin-bottom:1rem !important}.mb-xxl-4{margin-bottom:1.5rem !important}.mb-xxl-5{margin-bottom:3rem !important}.mb-xxl-auto{margin-bottom:auto !important}.ms-xxl-0{margin-left:0 !important}.ms-xxl-1{margin-left:.25rem !important}.ms-xxl-2{margin-left:.5rem !important}.ms-xxl-3{margin-left:1rem !important}.ms-xxl-4{margin-left:1.5rem !important}.ms-xxl-5{margin-left:3rem !important}.ms-xxl-auto{margin-left:auto !important}.p-xxl-0{padding:0 !important}.p-xxl-1{padding:.25rem !important}.p-xxl-2{padding:.5rem !important}.p-xxl-3{padding:1rem !important}.p-xxl-4{padding:1.5rem !important}.p-xxl-5{padding:3rem !important}.px-xxl-0{padding-right:0 !important;padding-left:0 !important}.px-xxl-1{padding-right:.25rem !important;padding-left:.25rem !important}.px-xxl-2{padding-right:.5rem !important;padding-left:.5rem !important}.px-xxl-3{padding-right:1rem !important;padding-left:1rem !important}.px-xxl-4{padding-right:1.5rem !important;padding-left:1.5rem !important}.px-xxl-5{padding-right:3rem !important;padding-left:3rem !important}.py-xxl-0{padding-top:0 !important;padding-bottom:0 !important}.py-xxl-1{padding-top:.25rem !important;padding-bottom:.25rem !important}.py-xxl-2{padding-top:.5rem !important;padding-bottom:.5rem !important}.py-xxl-3{padding-top:1rem !important;padding-bottom:1rem !important}.py-xxl-4{padding-top:1.5rem !important;padding-bottom:1.5rem !important}.py-xxl-5{padding-top:3rem !important;padding-bottom:3rem !important}.pt-xxl-0{padding-top:0 !important}.pt-xxl-1{padding-top:.25rem !important}.pt-xxl-2{padding-top:.5rem !important}.pt-xxl-3{padding-top:1rem !important}.pt-xxl-4{padding-top:1.5rem !important}.pt-xxl-5{padding-top:3rem !important}.pe-xxl-0{padding-right:0 !important}.pe-xxl-1{padding-right:.25rem !important}.pe-xxl-2{padding-right:.5rem !important}.pe-xxl-3{padding-right:1rem !important}.pe-xxl-4{padding-right:1.5rem !important}.pe-xxl-5{padding-right:3rem !important}.pb-xxl-0{padding-bottom:0 !important}.pb-xxl-1{padding-bottom:.25rem !important}.pb-xxl-2{padding-bottom:.5rem !important}.pb-xxl-3{padding-bottom:1rem !important}.pb-xxl-4{padding-bottom:1.5rem !important}.pb-xxl-5{padding-bottom:3rem !important}.ps-xxl-0{padding-left:0 !important}.ps-xxl-1{padding-left:.25rem !important}.ps-xxl-2{padding-left:.5rem !important}.ps-xxl-3{padding-left:1rem !important}.ps-xxl-4{padding-left:1.5rem !important}.ps-xxl-5{padding-left:3rem !important}.gap-xxl-0{gap:0 !important}.gap-xxl-1{gap:.25rem !important}.gap-xxl-2{gap:.5rem !important}.gap-xxl-3{gap:1rem !important}.gap-xxl-4{gap:1.5rem !important}.gap-xxl-5{gap:3rem !important}.row-gap-xxl-0{row-gap:0 !important}.row-gap-xxl-1{row-gap:.25rem !important}.row-gap-xxl-2{row-gap:.5rem !important}.row-gap-xxl-3{row-gap:1rem !important}.row-gap-xxl-4{row-gap:1.5rem !important}.row-gap-xxl-5{row-gap:3rem !important}.column-gap-xxl-0{column-gap:0 !important}.column-gap-xxl-1{column-gap:.25rem !important}.column-gap-xxl-2{column-gap:.5rem !important}.column-gap-xxl-3{column-gap:1rem !important}.column-gap-xxl-4{column-gap:1.5rem !important}.column-gap-xxl-5{column-gap:3rem !important}.text-xxl-start{text-align:left !important}.text-xxl-end{text-align:right !important}.text-xxl-center{text-align:center !important}}.bg-default{color:#fff}.bg-primary{color:#fff}.bg-secondary{color:#fff}.bg-success{color:#fff}.bg-info{color:#fff}.bg-warning{color:#fff}.bg-danger{color:#fff}.bg-light{color:#000}.bg-dark{color:#fff}@media(min-width: 1200px){.fs-1{font-size:2rem !important}.fs-2{font-size:1.65rem !important}.fs-3{font-size:1.45rem !important}}@media print{.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-grid{display:grid !important}.d-print-inline-grid{display:inline-grid !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:flex !important}.d-print-inline-flex{display:inline-flex !important}.d-print-none{display:none !important}}:root{--bslib-spacer: 1rem;--bslib-mb-spacer: var(--bslib-spacer, 1rem)}.bslib-mb-spacing{margin-bottom:var(--bslib-mb-spacer)}.bslib-gap-spacing{gap:var(--bslib-mb-spacer)}.bslib-gap-spacing>.bslib-mb-spacing,.bslib-gap-spacing>.form-group,.bslib-gap-spacing>p,.bslib-gap-spacing>pre{margin-bottom:0}.html-fill-container>.html-fill-item.bslib-mb-spacing{margin-bottom:0}.tab-content>.tab-pane.html-fill-container{display:none}.tab-content>.active.html-fill-container{display:flex}.tab-content.html-fill-container{padding:0}.bg-blue{--bslib-color-bg: #2780e3;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-blue{--bslib-color-fg: #2780e3;color:var(--bslib-color-fg)}.bg-indigo{--bslib-color-bg: #6610f2;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-indigo{--bslib-color-fg: #6610f2;color:var(--bslib-color-fg)}.bg-purple{--bslib-color-bg: #613d7c;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-purple{--bslib-color-fg: #613d7c;color:var(--bslib-color-fg)}.bg-pink{--bslib-color-bg: #e83e8c;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-pink{--bslib-color-fg: #e83e8c;color:var(--bslib-color-fg)}.bg-red{--bslib-color-bg: #ff0039;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-red{--bslib-color-fg: #ff0039;color:var(--bslib-color-fg)}.bg-orange{--bslib-color-bg: #f0ad4e;--bslib-color-fg: #000;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-orange{--bslib-color-fg: #f0ad4e;color:var(--bslib-color-fg)}.bg-yellow{--bslib-color-bg: #ff7518;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-yellow{--bslib-color-fg: #ff7518;color:var(--bslib-color-fg)}.bg-green{--bslib-color-bg: #3fb618;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-green{--bslib-color-fg: #3fb618;color:var(--bslib-color-fg)}.bg-teal{--bslib-color-bg: #20c997;--bslib-color-fg: #000;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-teal{--bslib-color-fg: #20c997;color:var(--bslib-color-fg)}.bg-cyan{--bslib-color-bg: #9954bb;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-cyan{--bslib-color-fg: #9954bb;color:var(--bslib-color-fg)}.text-default{--bslib-color-fg: #343a40}.bg-default{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.text-primary{--bslib-color-fg: #2780e3}.bg-primary{--bslib-color-bg: #2780e3;--bslib-color-fg: #fff}.text-secondary{--bslib-color-fg: #343a40}.bg-secondary{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.text-success{--bslib-color-fg: #3fb618}.bg-success{--bslib-color-bg: #3fb618;--bslib-color-fg: #fff}.text-info{--bslib-color-fg: #9954bb}.bg-info{--bslib-color-bg: #9954bb;--bslib-color-fg: #fff}.text-warning{--bslib-color-fg: #ff7518}.bg-warning{--bslib-color-bg: #ff7518;--bslib-color-fg: #fff}.text-danger{--bslib-color-fg: #ff0039}.bg-danger{--bslib-color-bg: #ff0039;--bslib-color-fg: #fff}.text-light{--bslib-color-fg: #f8f9fa}.bg-light{--bslib-color-bg: #f8f9fa;--bslib-color-fg: #000}.text-dark{--bslib-color-fg: #343a40}.bg-dark{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.bg-gradient-blue-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #4053e9;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #4053e9;color:#fff}.bg-gradient-blue-purple{--bslib-color-fg: #fff;--bslib-color-bg: #3e65ba;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #3e65ba;color:#fff}.bg-gradient-blue-pink{--bslib-color-fg: #fff;--bslib-color-bg: #7466c0;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #7466c0;color:#fff}.bg-gradient-blue-red{--bslib-color-fg: #fff;--bslib-color-bg: #7d4d9f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #7d4d9f;color:#fff}.bg-gradient-blue-orange{--bslib-color-fg: #fff;--bslib-color-bg: #7792a7;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #7792a7;color:#fff}.bg-gradient-blue-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #7d7c92;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #7d7c92;color:#fff}.bg-gradient-blue-green{--bslib-color-fg: #fff;--bslib-color-bg: #319692;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #319692;color:#fff}.bg-gradient-blue-teal{--bslib-color-fg: #fff;--bslib-color-bg: #249dc5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #249dc5;color:#fff}.bg-gradient-blue-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #556ed3;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #556ed3;color:#fff}.bg-gradient-indigo-blue{--bslib-color-fg: #fff;--bslib-color-bg: #4d3dec;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #4d3dec;color:#fff}.bg-gradient-indigo-purple{--bslib-color-fg: #fff;--bslib-color-bg: #6422c3;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #6422c3;color:#fff}.bg-gradient-indigo-pink{--bslib-color-fg: #fff;--bslib-color-bg: #9a22c9;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #9a22c9;color:#fff}.bg-gradient-indigo-red{--bslib-color-fg: #fff;--bslib-color-bg: #a30aa8;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #a30aa8;color:#fff}.bg-gradient-indigo-orange{--bslib-color-fg: #fff;--bslib-color-bg: #9d4fb0;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #9d4fb0;color:#fff}.bg-gradient-indigo-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #a3389b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #a3389b;color:#fff}.bg-gradient-indigo-green{--bslib-color-fg: #fff;--bslib-color-bg: #56529b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #56529b;color:#fff}.bg-gradient-indigo-teal{--bslib-color-fg: #fff;--bslib-color-bg: #4a5ace;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #4a5ace;color:#fff}.bg-gradient-indigo-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #7a2bdc;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #7a2bdc;color:#fff}.bg-gradient-purple-blue{--bslib-color-fg: #fff;--bslib-color-bg: #4a58a5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #4a58a5;color:#fff}.bg-gradient-purple-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #632bab;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #632bab;color:#fff}.bg-gradient-purple-pink{--bslib-color-fg: #fff;--bslib-color-bg: #973d82;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #973d82;color:#fff}.bg-gradient-purple-red{--bslib-color-fg: #fff;--bslib-color-bg: #a02561;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #a02561;color:#fff}.bg-gradient-purple-orange{--bslib-color-fg: #fff;--bslib-color-bg: #9a6a6a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #9a6a6a;color:#fff}.bg-gradient-purple-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #a05354;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #a05354;color:#fff}.bg-gradient-purple-green{--bslib-color-fg: #fff;--bslib-color-bg: #536d54;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #536d54;color:#fff}.bg-gradient-purple-teal{--bslib-color-fg: #fff;--bslib-color-bg: #477587;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #477587;color:#fff}.bg-gradient-purple-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #774695;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #774695;color:#fff}.bg-gradient-pink-blue{--bslib-color-fg: #fff;--bslib-color-bg: #9b58af;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #9b58af;color:#fff}.bg-gradient-pink-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #b42cb5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #b42cb5;color:#fff}.bg-gradient-pink-purple{--bslib-color-fg: #fff;--bslib-color-bg: #b23e86;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #b23e86;color:#fff}.bg-gradient-pink-red{--bslib-color-fg: #fff;--bslib-color-bg: #f1256b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #f1256b;color:#fff}.bg-gradient-pink-orange{--bslib-color-fg: #fff;--bslib-color-bg: #eb6a73;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #eb6a73;color:#fff}.bg-gradient-pink-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #f1545e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #f1545e;color:#fff}.bg-gradient-pink-green{--bslib-color-fg: #fff;--bslib-color-bg: #a46e5e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #a46e5e;color:#fff}.bg-gradient-pink-teal{--bslib-color-fg: #fff;--bslib-color-bg: #987690;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #987690;color:#fff}.bg-gradient-pink-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #c8479f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #c8479f;color:#fff}.bg-gradient-red-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a9337d;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a9337d;color:#fff}.bg-gradient-red-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #c20683;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #c20683;color:#fff}.bg-gradient-red-purple{--bslib-color-fg: #fff;--bslib-color-bg: #c01854;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #c01854;color:#fff}.bg-gradient-red-pink{--bslib-color-fg: #fff;--bslib-color-bg: #f6195a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #f6195a;color:#fff}.bg-gradient-red-orange{--bslib-color-fg: #fff;--bslib-color-bg: #f94541;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #f94541;color:#fff}.bg-gradient-red-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #ff2f2c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #ff2f2c;color:#fff}.bg-gradient-red-green{--bslib-color-fg: #fff;--bslib-color-bg: #b2492c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #b2492c;color:#fff}.bg-gradient-red-teal{--bslib-color-fg: #fff;--bslib-color-bg: #a6505f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #a6505f;color:#fff}.bg-gradient-red-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #d6226d;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #d6226d;color:#fff}.bg-gradient-orange-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a09b8a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a09b8a;color:#fff}.bg-gradient-orange-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #b96e90;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #b96e90;color:#fff}.bg-gradient-orange-purple{--bslib-color-fg: #fff;--bslib-color-bg: #b78060;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #b78060;color:#fff}.bg-gradient-orange-pink{--bslib-color-fg: #fff;--bslib-color-bg: #ed8167;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #ed8167;color:#fff}.bg-gradient-orange-red{--bslib-color-fg: #fff;--bslib-color-bg: #f66846;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #f66846;color:#fff}.bg-gradient-orange-yellow{--bslib-color-fg: #000;--bslib-color-bg: #f69738;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #f69738;color:#000}.bg-gradient-orange-green{--bslib-color-fg: #000;--bslib-color-bg: #a9b138;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #a9b138;color:#000}.bg-gradient-orange-teal{--bslib-color-fg: #000;--bslib-color-bg: #9db86b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #9db86b;color:#000}.bg-gradient-orange-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #cd897a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #cd897a;color:#fff}.bg-gradient-yellow-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a97969;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a97969;color:#fff}.bg-gradient-yellow-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #c24d6f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #c24d6f;color:#fff}.bg-gradient-yellow-purple{--bslib-color-fg: #fff;--bslib-color-bg: #c05f40;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #c05f40;color:#fff}.bg-gradient-yellow-pink{--bslib-color-fg: #fff;--bslib-color-bg: #f65f46;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #f65f46;color:#fff}.bg-gradient-yellow-red{--bslib-color-fg: #fff;--bslib-color-bg: #ff4625;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #ff4625;color:#fff}.bg-gradient-yellow-orange{--bslib-color-fg: #000;--bslib-color-bg: #f98b2e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #f98b2e;color:#000}.bg-gradient-yellow-green{--bslib-color-fg: #fff;--bslib-color-bg: #b28f18;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #b28f18;color:#fff}.bg-gradient-yellow-teal{--bslib-color-fg: #fff;--bslib-color-bg: #a6974b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #a6974b;color:#fff}.bg-gradient-yellow-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #d66859;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #d66859;color:#fff}.bg-gradient-green-blue{--bslib-color-fg: #fff;--bslib-color-bg: #35a069;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #35a069;color:#fff}.bg-gradient-green-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #4f746f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #4f746f;color:#fff}.bg-gradient-green-purple{--bslib-color-fg: #fff;--bslib-color-bg: #4d8640;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #4d8640;color:#fff}.bg-gradient-green-pink{--bslib-color-fg: #fff;--bslib-color-bg: #838646;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #838646;color:#fff}.bg-gradient-green-red{--bslib-color-fg: #fff;--bslib-color-bg: #8c6d25;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #8c6d25;color:#fff}.bg-gradient-green-orange{--bslib-color-fg: #000;--bslib-color-bg: #86b22e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #86b22e;color:#000}.bg-gradient-green-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #8c9c18;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #8c9c18;color:#fff}.bg-gradient-green-teal{--bslib-color-fg: #000;--bslib-color-bg: #33be4b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #33be4b;color:#000}.bg-gradient-green-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #638f59;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #638f59;color:#fff}.bg-gradient-teal-blue{--bslib-color-fg: #fff;--bslib-color-bg: #23acb5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #23acb5;color:#fff}.bg-gradient-teal-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #3c7fbb;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #3c7fbb;color:#fff}.bg-gradient-teal-purple{--bslib-color-fg: #fff;--bslib-color-bg: #3a918c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #3a918c;color:#fff}.bg-gradient-teal-pink{--bslib-color-fg: #fff;--bslib-color-bg: #709193;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #709193;color:#fff}.bg-gradient-teal-red{--bslib-color-fg: #fff;--bslib-color-bg: #797971;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #797971;color:#fff}.bg-gradient-teal-orange{--bslib-color-fg: #000;--bslib-color-bg: #73be7a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #73be7a;color:#000}.bg-gradient-teal-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #79a764;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #79a764;color:#fff}.bg-gradient-teal-green{--bslib-color-fg: #000;--bslib-color-bg: #2cc164;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #2cc164;color:#000}.bg-gradient-teal-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #509aa5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #509aa5;color:#fff}.bg-gradient-cyan-blue{--bslib-color-fg: #fff;--bslib-color-bg: #6b66cb;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #6b66cb;color:#fff}.bg-gradient-cyan-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #8539d1;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #8539d1;color:#fff}.bg-gradient-cyan-purple{--bslib-color-fg: #fff;--bslib-color-bg: #834ba2;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #834ba2;color:#fff}.bg-gradient-cyan-pink{--bslib-color-fg: #fff;--bslib-color-bg: #b94ba8;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #b94ba8;color:#fff}.bg-gradient-cyan-red{--bslib-color-fg: #fff;--bslib-color-bg: #c23287;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #c23287;color:#fff}.bg-gradient-cyan-orange{--bslib-color-fg: #fff;--bslib-color-bg: #bc788f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #bc788f;color:#fff}.bg-gradient-cyan-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #c2617a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #c2617a;color:#fff}.bg-gradient-cyan-green{--bslib-color-fg: #fff;--bslib-color-bg: #757b7a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #757b7a;color:#fff}.bg-gradient-cyan-teal{--bslib-color-fg: #fff;--bslib-color-bg: #6983ad;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #6983ad;color:#fff}.tab-content>.tab-pane.html-fill-container{display:none}.tab-content>.active.html-fill-container{display:flex}.tab-content.html-fill-container{padding:0}:root{--bslib-spacer: 1rem;--bslib-mb-spacer: var(--bslib-spacer, 1rem)}.bslib-mb-spacing{margin-bottom:var(--bslib-mb-spacer)}.bslib-gap-spacing{gap:var(--bslib-mb-spacer)}.bslib-gap-spacing>.bslib-mb-spacing,.bslib-gap-spacing>.form-group,.bslib-gap-spacing>p,.bslib-gap-spacing>pre{margin-bottom:0}.html-fill-container>.html-fill-item.bslib-mb-spacing{margin-bottom:0}.bg-blue{--bslib-color-bg: #2780e3;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-blue{--bslib-color-fg: #2780e3;color:var(--bslib-color-fg)}.bg-indigo{--bslib-color-bg: #6610f2;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-indigo{--bslib-color-fg: #6610f2;color:var(--bslib-color-fg)}.bg-purple{--bslib-color-bg: #613d7c;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-purple{--bslib-color-fg: #613d7c;color:var(--bslib-color-fg)}.bg-pink{--bslib-color-bg: #e83e8c;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-pink{--bslib-color-fg: #e83e8c;color:var(--bslib-color-fg)}.bg-red{--bslib-color-bg: #ff0039;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-red{--bslib-color-fg: #ff0039;color:var(--bslib-color-fg)}.bg-orange{--bslib-color-bg: #f0ad4e;--bslib-color-fg: #000;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-orange{--bslib-color-fg: #f0ad4e;color:var(--bslib-color-fg)}.bg-yellow{--bslib-color-bg: #ff7518;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-yellow{--bslib-color-fg: #ff7518;color:var(--bslib-color-fg)}.bg-green{--bslib-color-bg: #3fb618;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-green{--bslib-color-fg: #3fb618;color:var(--bslib-color-fg)}.bg-teal{--bslib-color-bg: #20c997;--bslib-color-fg: #000;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-teal{--bslib-color-fg: #20c997;color:var(--bslib-color-fg)}.bg-cyan{--bslib-color-bg: #9954bb;--bslib-color-fg: #fff;background-color:var(--bslib-color-bg);color:var(--bslib-color-fg)}.text-cyan{--bslib-color-fg: #9954bb;color:var(--bslib-color-fg)}.text-default{--bslib-color-fg: #343a40}.bg-default{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.text-primary{--bslib-color-fg: #2780e3}.bg-primary{--bslib-color-bg: #2780e3;--bslib-color-fg: #fff}.text-secondary{--bslib-color-fg: #343a40}.bg-secondary{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.text-success{--bslib-color-fg: #3fb618}.bg-success{--bslib-color-bg: #3fb618;--bslib-color-fg: #fff}.text-info{--bslib-color-fg: #9954bb}.bg-info{--bslib-color-bg: #9954bb;--bslib-color-fg: #fff}.text-warning{--bslib-color-fg: #ff7518}.bg-warning{--bslib-color-bg: #ff7518;--bslib-color-fg: #fff}.text-danger{--bslib-color-fg: #ff0039}.bg-danger{--bslib-color-bg: #ff0039;--bslib-color-fg: #fff}.text-light{--bslib-color-fg: #f8f9fa}.bg-light{--bslib-color-bg: #f8f9fa;--bslib-color-fg: #000}.text-dark{--bslib-color-fg: #343a40}.bg-dark{--bslib-color-bg: #343a40;--bslib-color-fg: #fff}.bg-gradient-blue-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #4053e9;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #4053e9;color:#fff}.bg-gradient-blue-purple{--bslib-color-fg: #fff;--bslib-color-bg: #3e65ba;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #3e65ba;color:#fff}.bg-gradient-blue-pink{--bslib-color-fg: #fff;--bslib-color-bg: #7466c0;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #7466c0;color:#fff}.bg-gradient-blue-red{--bslib-color-fg: #fff;--bslib-color-bg: #7d4d9f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #7d4d9f;color:#fff}.bg-gradient-blue-orange{--bslib-color-fg: #fff;--bslib-color-bg: #7792a7;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #7792a7;color:#fff}.bg-gradient-blue-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #7d7c92;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #7d7c92;color:#fff}.bg-gradient-blue-green{--bslib-color-fg: #fff;--bslib-color-bg: #319692;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #319692;color:#fff}.bg-gradient-blue-teal{--bslib-color-fg: #fff;--bslib-color-bg: #249dc5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #249dc5;color:#fff}.bg-gradient-blue-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #556ed3;background:linear-gradient(var(--bg-gradient-deg, 140deg), #2780e3 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #556ed3;color:#fff}.bg-gradient-indigo-blue{--bslib-color-fg: #fff;--bslib-color-bg: #4d3dec;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #4d3dec;color:#fff}.bg-gradient-indigo-purple{--bslib-color-fg: #fff;--bslib-color-bg: #6422c3;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #6422c3;color:#fff}.bg-gradient-indigo-pink{--bslib-color-fg: #fff;--bslib-color-bg: #9a22c9;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #9a22c9;color:#fff}.bg-gradient-indigo-red{--bslib-color-fg: #fff;--bslib-color-bg: #a30aa8;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #a30aa8;color:#fff}.bg-gradient-indigo-orange{--bslib-color-fg: #fff;--bslib-color-bg: #9d4fb0;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #9d4fb0;color:#fff}.bg-gradient-indigo-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #a3389b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #a3389b;color:#fff}.bg-gradient-indigo-green{--bslib-color-fg: #fff;--bslib-color-bg: #56529b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #56529b;color:#fff}.bg-gradient-indigo-teal{--bslib-color-fg: #fff;--bslib-color-bg: #4a5ace;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #4a5ace;color:#fff}.bg-gradient-indigo-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #7a2bdc;background:linear-gradient(var(--bg-gradient-deg, 140deg), #6610f2 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #7a2bdc;color:#fff}.bg-gradient-purple-blue{--bslib-color-fg: #fff;--bslib-color-bg: #4a58a5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #4a58a5;color:#fff}.bg-gradient-purple-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #632bab;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #632bab;color:#fff}.bg-gradient-purple-pink{--bslib-color-fg: #fff;--bslib-color-bg: #973d82;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #973d82;color:#fff}.bg-gradient-purple-red{--bslib-color-fg: #fff;--bslib-color-bg: #a02561;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #a02561;color:#fff}.bg-gradient-purple-orange{--bslib-color-fg: #fff;--bslib-color-bg: #9a6a6a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #9a6a6a;color:#fff}.bg-gradient-purple-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #a05354;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #a05354;color:#fff}.bg-gradient-purple-green{--bslib-color-fg: #fff;--bslib-color-bg: #536d54;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #536d54;color:#fff}.bg-gradient-purple-teal{--bslib-color-fg: #fff;--bslib-color-bg: #477587;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #477587;color:#fff}.bg-gradient-purple-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #774695;background:linear-gradient(var(--bg-gradient-deg, 140deg), #613d7c var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #774695;color:#fff}.bg-gradient-pink-blue{--bslib-color-fg: #fff;--bslib-color-bg: #9b58af;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #9b58af;color:#fff}.bg-gradient-pink-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #b42cb5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #b42cb5;color:#fff}.bg-gradient-pink-purple{--bslib-color-fg: #fff;--bslib-color-bg: #b23e86;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #b23e86;color:#fff}.bg-gradient-pink-red{--bslib-color-fg: #fff;--bslib-color-bg: #f1256b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #f1256b;color:#fff}.bg-gradient-pink-orange{--bslib-color-fg: #fff;--bslib-color-bg: #eb6a73;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #eb6a73;color:#fff}.bg-gradient-pink-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #f1545e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #f1545e;color:#fff}.bg-gradient-pink-green{--bslib-color-fg: #fff;--bslib-color-bg: #a46e5e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #a46e5e;color:#fff}.bg-gradient-pink-teal{--bslib-color-fg: #fff;--bslib-color-bg: #987690;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #987690;color:#fff}.bg-gradient-pink-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #c8479f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #e83e8c var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #c8479f;color:#fff}.bg-gradient-red-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a9337d;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a9337d;color:#fff}.bg-gradient-red-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #c20683;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #c20683;color:#fff}.bg-gradient-red-purple{--bslib-color-fg: #fff;--bslib-color-bg: #c01854;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #c01854;color:#fff}.bg-gradient-red-pink{--bslib-color-fg: #fff;--bslib-color-bg: #f6195a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #f6195a;color:#fff}.bg-gradient-red-orange{--bslib-color-fg: #fff;--bslib-color-bg: #f94541;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #f94541;color:#fff}.bg-gradient-red-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #ff2f2c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #ff2f2c;color:#fff}.bg-gradient-red-green{--bslib-color-fg: #fff;--bslib-color-bg: #b2492c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #b2492c;color:#fff}.bg-gradient-red-teal{--bslib-color-fg: #fff;--bslib-color-bg: #a6505f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #a6505f;color:#fff}.bg-gradient-red-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #d6226d;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff0039 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #d6226d;color:#fff}.bg-gradient-orange-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a09b8a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a09b8a;color:#fff}.bg-gradient-orange-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #b96e90;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #b96e90;color:#fff}.bg-gradient-orange-purple{--bslib-color-fg: #fff;--bslib-color-bg: #b78060;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #b78060;color:#fff}.bg-gradient-orange-pink{--bslib-color-fg: #fff;--bslib-color-bg: #ed8167;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #ed8167;color:#fff}.bg-gradient-orange-red{--bslib-color-fg: #fff;--bslib-color-bg: #f66846;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #f66846;color:#fff}.bg-gradient-orange-yellow{--bslib-color-fg: #000;--bslib-color-bg: #f69738;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #f69738;color:#000}.bg-gradient-orange-green{--bslib-color-fg: #000;--bslib-color-bg: #a9b138;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #a9b138;color:#000}.bg-gradient-orange-teal{--bslib-color-fg: #000;--bslib-color-bg: #9db86b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #9db86b;color:#000}.bg-gradient-orange-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #cd897a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #f0ad4e var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #cd897a;color:#fff}.bg-gradient-yellow-blue{--bslib-color-fg: #fff;--bslib-color-bg: #a97969;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #a97969;color:#fff}.bg-gradient-yellow-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #c24d6f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #c24d6f;color:#fff}.bg-gradient-yellow-purple{--bslib-color-fg: #fff;--bslib-color-bg: #c05f40;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #c05f40;color:#fff}.bg-gradient-yellow-pink{--bslib-color-fg: #fff;--bslib-color-bg: #f65f46;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #f65f46;color:#fff}.bg-gradient-yellow-red{--bslib-color-fg: #fff;--bslib-color-bg: #ff4625;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #ff4625;color:#fff}.bg-gradient-yellow-orange{--bslib-color-fg: #000;--bslib-color-bg: #f98b2e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #f98b2e;color:#000}.bg-gradient-yellow-green{--bslib-color-fg: #fff;--bslib-color-bg: #b28f18;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #b28f18;color:#fff}.bg-gradient-yellow-teal{--bslib-color-fg: #fff;--bslib-color-bg: #a6974b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #a6974b;color:#fff}.bg-gradient-yellow-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #d66859;background:linear-gradient(var(--bg-gradient-deg, 140deg), #ff7518 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #d66859;color:#fff}.bg-gradient-green-blue{--bslib-color-fg: #fff;--bslib-color-bg: #35a069;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #35a069;color:#fff}.bg-gradient-green-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #4f746f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #4f746f;color:#fff}.bg-gradient-green-purple{--bslib-color-fg: #fff;--bslib-color-bg: #4d8640;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #4d8640;color:#fff}.bg-gradient-green-pink{--bslib-color-fg: #fff;--bslib-color-bg: #838646;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #838646;color:#fff}.bg-gradient-green-red{--bslib-color-fg: #fff;--bslib-color-bg: #8c6d25;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #8c6d25;color:#fff}.bg-gradient-green-orange{--bslib-color-fg: #000;--bslib-color-bg: #86b22e;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #86b22e;color:#000}.bg-gradient-green-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #8c9c18;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #8c9c18;color:#fff}.bg-gradient-green-teal{--bslib-color-fg: #000;--bslib-color-bg: #33be4b;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #33be4b;color:#000}.bg-gradient-green-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #638f59;background:linear-gradient(var(--bg-gradient-deg, 140deg), #3fb618 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #638f59;color:#fff}.bg-gradient-teal-blue{--bslib-color-fg: #fff;--bslib-color-bg: #23acb5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #23acb5;color:#fff}.bg-gradient-teal-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #3c7fbb;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #3c7fbb;color:#fff}.bg-gradient-teal-purple{--bslib-color-fg: #fff;--bslib-color-bg: #3a918c;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #3a918c;color:#fff}.bg-gradient-teal-pink{--bslib-color-fg: #fff;--bslib-color-bg: #709193;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #709193;color:#fff}.bg-gradient-teal-red{--bslib-color-fg: #fff;--bslib-color-bg: #797971;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #797971;color:#fff}.bg-gradient-teal-orange{--bslib-color-fg: #000;--bslib-color-bg: #73be7a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #73be7a;color:#000}.bg-gradient-teal-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #79a764;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #79a764;color:#fff}.bg-gradient-teal-green{--bslib-color-fg: #000;--bslib-color-bg: #2cc164;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #2cc164;color:#000}.bg-gradient-teal-cyan{--bslib-color-fg: #fff;--bslib-color-bg: #509aa5;background:linear-gradient(var(--bg-gradient-deg, 140deg), #20c997 var(--bg-gradient-start, 36%), #9954bb var(--bg-gradient-end, 180%)) #509aa5;color:#fff}.bg-gradient-cyan-blue{--bslib-color-fg: #fff;--bslib-color-bg: #6b66cb;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #2780e3 var(--bg-gradient-end, 180%)) #6b66cb;color:#fff}.bg-gradient-cyan-indigo{--bslib-color-fg: #fff;--bslib-color-bg: #8539d1;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #6610f2 var(--bg-gradient-end, 180%)) #8539d1;color:#fff}.bg-gradient-cyan-purple{--bslib-color-fg: #fff;--bslib-color-bg: #834ba2;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #613d7c var(--bg-gradient-end, 180%)) #834ba2;color:#fff}.bg-gradient-cyan-pink{--bslib-color-fg: #fff;--bslib-color-bg: #b94ba8;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #e83e8c var(--bg-gradient-end, 180%)) #b94ba8;color:#fff}.bg-gradient-cyan-red{--bslib-color-fg: #fff;--bslib-color-bg: #c23287;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #ff0039 var(--bg-gradient-end, 180%)) #c23287;color:#fff}.bg-gradient-cyan-orange{--bslib-color-fg: #fff;--bslib-color-bg: #bc788f;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #f0ad4e var(--bg-gradient-end, 180%)) #bc788f;color:#fff}.bg-gradient-cyan-yellow{--bslib-color-fg: #fff;--bslib-color-bg: #c2617a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #ff7518 var(--bg-gradient-end, 180%)) #c2617a;color:#fff}.bg-gradient-cyan-green{--bslib-color-fg: #fff;--bslib-color-bg: #757b7a;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #3fb618 var(--bg-gradient-end, 180%)) #757b7a;color:#fff}.bg-gradient-cyan-teal{--bslib-color-fg: #fff;--bslib-color-bg: #6983ad;background:linear-gradient(var(--bg-gradient-deg, 140deg), #9954bb var(--bg-gradient-start, 36%), #20c997 var(--bg-gradient-end, 180%)) #6983ad;color:#fff}.bslib-grid{display:grid !important;gap:var(--bslib-spacer, 1rem);height:var(--bslib-grid-height)}.bslib-grid.grid{grid-template-columns:repeat(var(--bs-columns, 12), minmax(0, 1fr));grid-template-rows:unset;grid-auto-rows:var(--bslib-grid--row-heights);--bslib-grid--row-heights--xs: unset;--bslib-grid--row-heights--sm: unset;--bslib-grid--row-heights--md: unset;--bslib-grid--row-heights--lg: unset;--bslib-grid--row-heights--xl: unset;--bslib-grid--row-heights--xxl: unset}.bslib-grid.grid.bslib-grid--row-heights--xs{--bslib-grid--row-heights: var(--bslib-grid--row-heights--xs)}@media(min-width: 576px){.bslib-grid.grid.bslib-grid--row-heights--sm{--bslib-grid--row-heights: var(--bslib-grid--row-heights--sm)}}@media(min-width: 768px){.bslib-grid.grid.bslib-grid--row-heights--md{--bslib-grid--row-heights: var(--bslib-grid--row-heights--md)}}@media(min-width: 992px){.bslib-grid.grid.bslib-grid--row-heights--lg{--bslib-grid--row-heights: var(--bslib-grid--row-heights--lg)}}@media(min-width: 1200px){.bslib-grid.grid.bslib-grid--row-heights--xl{--bslib-grid--row-heights: var(--bslib-grid--row-heights--xl)}}@media(min-width: 1400px){.bslib-grid.grid.bslib-grid--row-heights--xxl{--bslib-grid--row-heights: var(--bslib-grid--row-heights--xxl)}}.bslib-grid>*>.shiny-input-container{width:100%}.bslib-grid-item{grid-column:auto/span 1}@media(max-width: 767.98px){.bslib-grid-item{grid-column:1/-1}}@media(max-width: 575.98px){.bslib-grid{grid-template-columns:1fr !important;height:var(--bslib-grid-height-mobile)}.bslib-grid.grid{height:unset !important;grid-auto-rows:var(--bslib-grid--row-heights--xs, auto)}}:root{--bslib-page-sidebar-title-bg: #2780e3;--bslib-page-sidebar-title-color: #fff}.bslib-page-title{background-color:var(--bslib-page-sidebar-title-bg);color:var(--bslib-page-sidebar-title-color);font-size:1.25rem;font-weight:300;padding:var(--bslib-spacer, 1rem);padding-left:1.5rem;margin-bottom:0;border-bottom:1px solid #dee2e6}html{height:100%}.bslib-page-fill{width:100%;height:100%;margin:0;padding:var(--bslib-spacer, 1rem);gap:var(--bslib-spacer, 1rem)}@media(max-width: 575.98px){.bslib-page-fill{height:var(--bslib-page-fill-mobile-height, auto)}}@media(min-width: 576px){.nav:not(.nav-hidden){display:flex !important;display:-webkit-flex !important}.nav:not(.nav-hidden):not(.nav-stacked):not(.flex-column){float:none !important}.nav:not(.nav-hidden):not(.nav-stacked):not(.flex-column)>.bslib-nav-spacer{margin-left:auto !important}.nav:not(.nav-hidden):not(.nav-stacked):not(.flex-column)>.form-inline{margin-top:auto;margin-bottom:auto}.nav:not(.nav-hidden).nav-stacked{flex-direction:column;-webkit-flex-direction:column;height:100%}.nav:not(.nav-hidden).nav-stacked>.bslib-nav-spacer{margin-top:auto !important}}.accordion .accordion-header{font-size:calc(1.29rem + 0.48vw);margin-top:0;margin-bottom:.5rem;font-weight:400;line-height:1.2;color:var(--bs-heading-color);margin-bottom:0}@media(min-width: 1200px){.accordion .accordion-header{font-size:1.65rem}}.accordion .accordion-icon:not(:empty){margin-right:.75rem;display:flex}.accordion .accordion-button:not(.collapsed){box-shadow:none}.accordion .accordion-button:not(.collapsed):focus{box-shadow:var(--bs-accordion-btn-focus-box-shadow)}.bslib-card{overflow:auto}.bslib-card .card-body+.card-body{padding-top:0}.bslib-card .card-body{overflow:auto}.bslib-card .card-body p{margin-top:0}.bslib-card .card-body p:last-child{margin-bottom:0}.bslib-card .card-body{max-height:var(--bslib-card-body-max-height, none)}.bslib-card[data-full-screen=true]>.card-body{max-height:var(--bslib-card-body-max-height-full-screen, none)}.bslib-card .card-header .form-group{margin-bottom:0}.bslib-card .card-header .selectize-control{margin-bottom:0}.bslib-card .card-header .selectize-control .item{margin-right:1.15rem}.bslib-card .card-footer{margin-top:auto}.bslib-card .bslib-navs-card-title{display:flex;flex-wrap:wrap;justify-content:space-between;align-items:center}.bslib-card .bslib-navs-card-title .nav{margin-left:auto}.bslib-card .bslib-sidebar-layout:not([data-bslib-sidebar-border=true]){border:none}.bslib-card .bslib-sidebar-layout:not([data-bslib-sidebar-border-radius=true]){border-top-left-radius:0;border-top-right-radius:0}[data-full-screen=true]{position:fixed;inset:3.5rem 1rem 1rem;height:auto !important;max-height:none !important;width:auto !important;z-index:1070}.bslib-full-screen-enter{display:none;position:absolute;bottom:var(--bslib-full-screen-enter-bottom, 0.2rem);right:var(--bslib-full-screen-enter-right, 0);top:var(--bslib-full-screen-enter-top);left:var(--bslib-full-screen-enter-left);color:var(--bslib-color-fg, var(--bs-card-color));background-color:var(--bslib-color-bg, var(--bs-card-bg, var(--bs-body-bg)));border:var(--bs-card-border-width) solid var(--bslib-color-fg, var(--bs-card-border-color));box-shadow:0 2px 4px rgba(0,0,0,.15);margin:.2rem .4rem;padding:.55rem !important;font-size:.8rem;cursor:pointer;opacity:.7;z-index:1070}.bslib-full-screen-enter:hover{opacity:1}.card[data-full-screen=false]:hover>*>.bslib-full-screen-enter{display:block}.bslib-has-full-screen .card:hover>*>.bslib-full-screen-enter{display:none}@media(max-width: 575.98px){.bslib-full-screen-enter{display:none !important}}.bslib-full-screen-exit{position:relative;top:1.35rem;font-size:.9rem;cursor:pointer;text-decoration:none;display:flex;float:right;margin-right:2.15rem;align-items:center;color:rgba(var(--bs-body-bg-rgb), 0.8)}.bslib-full-screen-exit:hover{color:rgba(var(--bs-body-bg-rgb), 1)}.bslib-full-screen-exit svg{margin-left:.5rem;font-size:1.5rem}#bslib-full-screen-overlay{position:fixed;inset:0;background-color:rgba(var(--bs-body-color-rgb), 0.6);backdrop-filter:blur(2px);-webkit-backdrop-filter:blur(2px);z-index:1069;animation:bslib-full-screen-overlay-enter 400ms cubic-bezier(0.6, 0.02, 0.65, 1) forwards}@keyframes bslib-full-screen-overlay-enter{0%{opacity:0}100%{opacity:1}}.navbar+.container-fluid:has(>.tab-content>.tab-pane.active.html-fill-container),.navbar+.container-sm:has(>.tab-content>.tab-pane.active.html-fill-container),.navbar+.container-md:has(>.tab-content>.tab-pane.active.html-fill-container),.navbar+.container-lg:has(>.tab-content>.tab-pane.active.html-fill-container),.navbar+.container-xl:has(>.tab-content>.tab-pane.active.html-fill-container),.navbar+.container-xxl:has(>.tab-content>.tab-pane.active.html-fill-container){padding-left:0;padding-right:0}.navbar+.container-fluid>.tab-content>.tab-pane.active.html-fill-container,.navbar+.container-sm>.tab-content>.tab-pane.active.html-fill-container,.navbar+.container-md>.tab-content>.tab-pane.active.html-fill-container,.navbar+.container-lg>.tab-content>.tab-pane.active.html-fill-container,.navbar+.container-xl>.tab-content>.tab-pane.active.html-fill-container,.navbar+.container-xxl>.tab-content>.tab-pane.active.html-fill-container{padding:var(--bslib-spacer, 1rem);gap:var(--bslib-spacer, 1rem)}.navbar+.container-fluid>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child),.navbar+.container-sm>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child),.navbar+.container-md>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child),.navbar+.container-lg>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child),.navbar+.container-xl>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child),.navbar+.container-xxl>.tab-content>.tab-pane.active.html-fill-container:has(>.bslib-sidebar-layout:only-child){padding:0}.navbar+.container-fluid>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]),.navbar+.container-sm>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]),.navbar+.container-md>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]),.navbar+.container-lg>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]),.navbar+.container-xl>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]),.navbar+.container-xxl>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border=true]){border-left:none;border-right:none;border-bottom:none}.navbar+.container-fluid>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]),.navbar+.container-sm>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]),.navbar+.container-md>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]),.navbar+.container-lg>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]),.navbar+.container-xl>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]),.navbar+.container-xxl>.tab-content>.tab-pane.active.html-fill-container>.bslib-sidebar-layout:only-child:not([data-bslib-sidebar-border-radius=true]){border-radius:0}.navbar+div>.bslib-sidebar-layout{border-top:var(--bslib-sidebar-border)}:root{--bslib-value-box-shadow: none;--bslib-value-box-border-width-auto-yes: var(--bslib-value-box-border-width-baseline);--bslib-value-box-border-width-auto-no: 0;--bslib-value-box-border-width-baseline: 1px}.bslib-value-box{border-width:var(--bslib-value-box-border-width-auto-no, var(--bslib-value-box-border-width-baseline));container-name:bslib-value-box;container-type:inline-size}.bslib-value-box.card{box-shadow:var(--bslib-value-box-shadow)}.bslib-value-box.border-auto{border-width:var(--bslib-value-box-border-width-auto-yes, var(--bslib-value-box-border-width-baseline))}.bslib-value-box.default{--bslib-value-box-bg-default: var(--bs-card-bg, #fff);--bslib-value-box-border-color-default: var(--bs-card-border-color, rgba(0, 0, 0, 0.175));color:var(--bslib-value-box-color);background-color:var(--bslib-value-box-bg, var(--bslib-value-box-bg-default));border-color:var(--bslib-value-box-border-color, var(--bslib-value-box-border-color-default))}.bslib-value-box .value-box-grid{display:grid;grid-template-areas:"left right";align-items:center;overflow:hidden}.bslib-value-box .value-box-showcase{height:100%;max-height:var(---bslib-value-box-showcase-max-h, 100%)}.bslib-value-box .value-box-showcase,.bslib-value-box .value-box-showcase>.html-fill-item{width:100%}.bslib-value-box[data-full-screen=true] .value-box-showcase{max-height:var(---bslib-value-box-showcase-max-h-fs, 100%)}@media screen and (min-width: 575.98px){@container bslib-value-box (max-width: 300px){.bslib-value-box:not(.showcase-bottom) .value-box-grid{grid-template-columns:1fr !important;grid-template-rows:auto auto;grid-template-areas:"top" "bottom"}.bslib-value-box:not(.showcase-bottom) .value-box-grid .value-box-showcase{grid-area:top !important}.bslib-value-box:not(.showcase-bottom) .value-box-grid .value-box-area{grid-area:bottom !important;justify-content:end}}}.bslib-value-box .value-box-area{justify-content:center;padding:1.5rem 1rem;font-size:.9rem;font-weight:500}.bslib-value-box .value-box-area *{margin-bottom:0;margin-top:0}.bslib-value-box .value-box-title{font-size:1rem;margin-top:0;margin-bottom:.5rem;font-weight:400;line-height:1.2}.bslib-value-box .value-box-title:empty::after{content:" "}.bslib-value-box .value-box-value{font-size:calc(1.29rem + 0.48vw);margin-top:0;margin-bottom:.5rem;font-weight:400;line-height:1.2}@media(min-width: 1200px){.bslib-value-box .value-box-value{font-size:1.65rem}}.bslib-value-box .value-box-value:empty::after{content:" "}.bslib-value-box .value-box-showcase{align-items:center;justify-content:center;margin-top:auto;margin-bottom:auto;padding:1rem}.bslib-value-box .value-box-showcase .bi,.bslib-value-box .value-box-showcase .fa,.bslib-value-box .value-box-showcase .fab,.bslib-value-box .value-box-showcase .fas,.bslib-value-box .value-box-showcase .far{opacity:.85;min-width:50px;max-width:125%}.bslib-value-box .value-box-showcase .bi,.bslib-value-box .value-box-showcase .fa,.bslib-value-box .value-box-showcase .fab,.bslib-value-box .value-box-showcase .fas,.bslib-value-box .value-box-showcase .far{font-size:4rem}.bslib-value-box.showcase-top-right .value-box-grid{grid-template-columns:1fr var(---bslib-value-box-showcase-w, 50%)}.bslib-value-box.showcase-top-right .value-box-grid .value-box-showcase{grid-area:right;margin-left:auto;align-self:start;align-items:end;padding-left:0;padding-bottom:0}.bslib-value-box.showcase-top-right .value-box-grid .value-box-area{grid-area:left;align-self:end}.bslib-value-box.showcase-top-right[data-full-screen=true] .value-box-grid{grid-template-columns:auto var(---bslib-value-box-showcase-w-fs, 1fr)}.bslib-value-box.showcase-top-right[data-full-screen=true] .value-box-grid>div{align-self:center}.bslib-value-box.showcase-top-right:not([data-full-screen=true]) .value-box-showcase{margin-top:0}@container bslib-value-box (max-width: 300px){.bslib-value-box.showcase-top-right:not([data-full-screen=true]) .value-box-grid .value-box-showcase{padding-left:1rem}}.bslib-value-box.showcase-left-center .value-box-grid{grid-template-columns:var(---bslib-value-box-showcase-w, 30%) auto}.bslib-value-box.showcase-left-center[data-full-screen=true] .value-box-grid{grid-template-columns:var(---bslib-value-box-showcase-w-fs, 1fr) auto}.bslib-value-box.showcase-left-center:not([data-fill-screen=true]) .value-box-grid .value-box-showcase{grid-area:left}.bslib-value-box.showcase-left-center:not([data-fill-screen=true]) .value-box-grid .value-box-area{grid-area:right}.bslib-value-box.showcase-bottom .value-box-grid{grid-template-columns:1fr;grid-template-rows:1fr var(---bslib-value-box-showcase-h, auto);grid-template-areas:"top" "bottom";overflow:hidden}.bslib-value-box.showcase-bottom .value-box-grid .value-box-showcase{grid-area:bottom;padding:0;margin:0}.bslib-value-box.showcase-bottom .value-box-grid .value-box-area{grid-area:top}.bslib-value-box.showcase-bottom[data-full-screen=true] .value-box-grid{grid-template-rows:1fr var(---bslib-value-box-showcase-h-fs, 2fr)}.bslib-value-box.showcase-bottom[data-full-screen=true] .value-box-grid .value-box-showcase{padding:1rem}[data-bs-theme=dark] .bslib-value-box{--bslib-value-box-shadow: 0 0.5rem 1rem rgb(0 0 0 / 50%)}.bslib-sidebar-layout{--bslib-sidebar-transition-duration: 500ms;--bslib-sidebar-transition-easing-x: cubic-bezier(0.8, 0.78, 0.22, 1.07);--bslib-sidebar-border: var(--bs-card-border-width, 1px) solid var(--bs-card-border-color, rgba(0, 0, 0, 0.175));--bslib-sidebar-border-radius: var(--bs-border-radius);--bslib-sidebar-vert-border: var(--bs-card-border-width, 1px) solid var(--bs-card-border-color, rgba(0, 0, 0, 0.175));--bslib-sidebar-bg: rgba(var(--bs-emphasis-color-rgb, 0, 0, 0), 0.05);--bslib-sidebar-fg: var(--bs-emphasis-color, black);--bslib-sidebar-main-fg: var(--bs-card-color, var(--bs-body-color));--bslib-sidebar-main-bg: var(--bs-card-bg, var(--bs-body-bg));--bslib-sidebar-toggle-bg: rgba(var(--bs-emphasis-color-rgb, 0, 0, 0), 0.1);--bslib-sidebar-padding: calc(var(--bslib-spacer) * 1.5);--bslib-sidebar-icon-size: var(--bslib-spacer, 1rem);--bslib-sidebar-icon-button-size: calc(var(--bslib-sidebar-icon-size, 1rem) * 2);--bslib-sidebar-padding-icon: calc(var(--bslib-sidebar-icon-button-size, 2rem) * 1.5);--bslib-collapse-toggle-border-radius: var(--bs-border-radius, 0.25rem);--bslib-collapse-toggle-transform: 0deg;--bslib-sidebar-toggle-transition-easing: cubic-bezier(1, 0, 0, 1);--bslib-collapse-toggle-right-transform: 180deg;--bslib-sidebar-column-main: minmax(0, 1fr);display:grid !important;grid-template-columns:min(100% - var(--bslib-sidebar-icon-size),var(--bslib-sidebar-width, 250px)) var(--bslib-sidebar-column-main);position:relative;transition:grid-template-columns ease-in-out var(--bslib-sidebar-transition-duration);border:var(--bslib-sidebar-border);border-radius:var(--bslib-sidebar-border-radius)}@media(prefers-reduced-motion: reduce){.bslib-sidebar-layout{transition:none}}.bslib-sidebar-layout[data-bslib-sidebar-border=false]{border:none}.bslib-sidebar-layout[data-bslib-sidebar-border-radius=false]{border-radius:initial}.bslib-sidebar-layout>.main,.bslib-sidebar-layout>.sidebar{grid-row:1/2;border-radius:inherit;overflow:auto}.bslib-sidebar-layout>.main{grid-column:2/3;border-top-left-radius:0;border-bottom-left-radius:0;padding:var(--bslib-sidebar-padding);transition:padding var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration);color:var(--bslib-sidebar-main-fg);background-color:var(--bslib-sidebar-main-bg)}.bslib-sidebar-layout>.sidebar{grid-column:1/2;width:100%;height:100%;border-right:var(--bslib-sidebar-vert-border);border-top-right-radius:0;border-bottom-right-radius:0;color:var(--bslib-sidebar-fg);background-color:var(--bslib-sidebar-bg);backdrop-filter:blur(5px)}.bslib-sidebar-layout>.sidebar>.sidebar-content{display:flex;flex-direction:column;gap:var(--bslib-spacer, 1rem);padding:var(--bslib-sidebar-padding);padding-top:var(--bslib-sidebar-padding-icon)}.bslib-sidebar-layout>.sidebar>.sidebar-content>:last-child:not(.sidebar-title){margin-bottom:0}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion{margin-left:calc(-1*var(--bslib-sidebar-padding));margin-right:calc(-1*var(--bslib-sidebar-padding))}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion:last-child{margin-bottom:calc(-1*var(--bslib-sidebar-padding))}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion:not(:last-child){margin-bottom:1rem}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion .accordion-body{display:flex;flex-direction:column}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion:not(:first-child) .accordion-item:first-child{border-top:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color)}.bslib-sidebar-layout>.sidebar>.sidebar-content>.accordion:not(:last-child) .accordion-item:last-child{border-bottom:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color)}.bslib-sidebar-layout>.sidebar>.sidebar-content.has-accordion>.sidebar-title{border-bottom:none;padding-bottom:0}.bslib-sidebar-layout>.sidebar .shiny-input-container{width:100%}.bslib-sidebar-layout[data-bslib-sidebar-open=always]>.sidebar>.sidebar-content{padding-top:var(--bslib-sidebar-padding)}.bslib-sidebar-layout>.collapse-toggle{grid-row:1/2;grid-column:1/2;display:inline-flex;align-items:center;position:absolute;right:calc(var(--bslib-sidebar-icon-size));top:calc(var(--bslib-sidebar-icon-size, 1rem)/2);border:none;border-radius:var(--bslib-collapse-toggle-border-radius);height:var(--bslib-sidebar-icon-button-size, 2rem);width:var(--bslib-sidebar-icon-button-size, 2rem);display:flex;align-items:center;justify-content:center;padding:0;color:var(--bslib-sidebar-fg);background-color:unset;transition:color var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration),top var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration),right var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration),left var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration)}.bslib-sidebar-layout>.collapse-toggle:hover{background-color:var(--bslib-sidebar-toggle-bg)}.bslib-sidebar-layout>.collapse-toggle>.collapse-icon{opacity:.8;width:var(--bslib-sidebar-icon-size);height:var(--bslib-sidebar-icon-size);transform:rotateY(var(--bslib-collapse-toggle-transform));transition:transform var(--bslib-sidebar-toggle-transition-easing) var(--bslib-sidebar-transition-duration)}.bslib-sidebar-layout>.collapse-toggle:hover>.collapse-icon{opacity:1}.bslib-sidebar-layout .sidebar-title{font-size:1.25rem;line-height:1.25;margin-top:0;margin-bottom:1rem;padding-bottom:1rem;border-bottom:var(--bslib-sidebar-border)}.bslib-sidebar-layout.sidebar-right{grid-template-columns:var(--bslib-sidebar-column-main) min(100% - var(--bslib-sidebar-icon-size),var(--bslib-sidebar-width, 250px))}.bslib-sidebar-layout.sidebar-right>.main{grid-column:1/2;border-top-right-radius:0;border-bottom-right-radius:0;border-top-left-radius:inherit;border-bottom-left-radius:inherit}.bslib-sidebar-layout.sidebar-right>.sidebar{grid-column:2/3;border-right:none;border-left:var(--bslib-sidebar-vert-border);border-top-left-radius:0;border-bottom-left-radius:0}.bslib-sidebar-layout.sidebar-right>.collapse-toggle{grid-column:2/3;left:var(--bslib-sidebar-icon-size);right:unset;border:var(--bslib-collapse-toggle-border)}.bslib-sidebar-layout.sidebar-right>.collapse-toggle>.collapse-icon{transform:rotateY(var(--bslib-collapse-toggle-right-transform))}.bslib-sidebar-layout.sidebar-collapsed{--bslib-collapse-toggle-transform: 180deg;--bslib-collapse-toggle-right-transform: 0deg;--bslib-sidebar-vert-border: none;grid-template-columns:0 minmax(0, 1fr)}.bslib-sidebar-layout.sidebar-collapsed.sidebar-right{grid-template-columns:minmax(0, 1fr) 0}.bslib-sidebar-layout.sidebar-collapsed:not(.transitioning)>.sidebar>*{display:none}.bslib-sidebar-layout.sidebar-collapsed>.main{border-radius:inherit}.bslib-sidebar-layout.sidebar-collapsed:not(.sidebar-right)>.main{padding-left:var(--bslib-sidebar-padding-icon)}.bslib-sidebar-layout.sidebar-collapsed.sidebar-right>.main{padding-right:var(--bslib-sidebar-padding-icon)}.bslib-sidebar-layout.sidebar-collapsed>.collapse-toggle{color:var(--bslib-sidebar-main-fg);top:calc(var(--bslib-sidebar-overlap-counter, 0)*(var(--bslib-sidebar-icon-size) + var(--bslib-sidebar-padding)) + var(--bslib-sidebar-icon-size, 1rem)/2);right:calc(-2.5*var(--bslib-sidebar-icon-size) - var(--bs-card-border-width, 1px))}.bslib-sidebar-layout.sidebar-collapsed.sidebar-right>.collapse-toggle{left:calc(-2.5*var(--bslib-sidebar-icon-size) - var(--bs-card-border-width, 1px));right:unset}@media(min-width: 576px){.bslib-sidebar-layout.transitioning>.sidebar>.sidebar-content{display:none}}@media(max-width: 575.98px){.bslib-sidebar-layout[data-bslib-sidebar-open=desktop]{--bslib-sidebar-js-init-collapsed: true}.bslib-sidebar-layout>.sidebar,.bslib-sidebar-layout.sidebar-right>.sidebar{border:none}.bslib-sidebar-layout>.main,.bslib-sidebar-layout.sidebar-right>.main{grid-column:1/3}.bslib-sidebar-layout[data-bslib-sidebar-open=always]{display:block !important}.bslib-sidebar-layout[data-bslib-sidebar-open=always]>.sidebar{max-height:var(--bslib-sidebar-max-height-mobile);overflow-y:auto;border-top:var(--bslib-sidebar-vert-border)}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]){grid-template-columns:100% 0}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]):not(.sidebar-collapsed)>.sidebar{z-index:1}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]):not(.sidebar-collapsed)>.collapse-toggle{z-index:1}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]).sidebar-right{grid-template-columns:0 100%}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]).sidebar-collapsed{grid-template-columns:0 100%}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]).sidebar-collapsed.sidebar-right{grid-template-columns:100% 0}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]):not(.sidebar-right)>.main{padding-left:var(--bslib-sidebar-padding-icon)}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]).sidebar-right>.main{padding-right:var(--bslib-sidebar-padding-icon)}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always])>.main{opacity:0;transition:opacity var(--bslib-sidebar-transition-easing-x) var(--bslib-sidebar-transition-duration)}.bslib-sidebar-layout:not([data-bslib-sidebar-open=always]).sidebar-collapsed>.main{opacity:1}}.html-fill-container{display:flex;flex-direction:column;min-height:0;min-width:0}.html-fill-container>.html-fill-item{flex:1 1 auto;min-height:0;min-width:0}.html-fill-container>:not(.html-fill-item){flex:0 0 auto}.quarto-container{min-height:calc(100vh - 132px)}body.hypothesis-enabled #quarto-header{margin-right:16px}footer.footer .nav-footer,#quarto-header>nav{padding-left:1em;padding-right:1em}footer.footer div.nav-footer p:first-child{margin-top:0}footer.footer div.nav-footer p:last-child{margin-bottom:0}#quarto-content>*{padding-top:14px}#quarto-content>#quarto-sidebar-glass{padding-top:0px}@media(max-width: 991.98px){#quarto-content>*{padding-top:0}#quarto-content .subtitle{padding-top:14px}#quarto-content section:first-of-type h2:first-of-type,#quarto-content section:first-of-type .h2:first-of-type{margin-top:1rem}}.headroom-target,header.headroom{will-change:transform;transition:position 200ms linear;transition:all 200ms linear}header.headroom--pinned{transform:translateY(0%)}header.headroom--unpinned{transform:translateY(-100%)}.navbar-container{width:100%}.navbar-brand{overflow:hidden;text-overflow:ellipsis}.navbar-brand-container{max-width:calc(100% - 115px);min-width:0;display:flex;align-items:center}@media(min-width: 992px){.navbar-brand-container{margin-right:1em}}.navbar-brand.navbar-brand-logo{margin-right:4px;display:inline-flex}.navbar-toggler{flex-basis:content;flex-shrink:0}.navbar .navbar-brand-container{order:2}.navbar .navbar-toggler{order:1}.navbar .navbar-container>.navbar-nav{order:20}.navbar .navbar-container>.navbar-brand-container{margin-left:0 !important;margin-right:0 !important}.navbar .navbar-collapse{order:20}.navbar #quarto-search{order:4;margin-left:auto}.navbar .navbar-toggler{margin-right:.5em}.navbar-logo{max-height:24px;width:auto;padding-right:4px}nav .nav-item:not(.compact){padding-top:1px}nav .nav-link i,nav .dropdown-item i{padding-right:1px}.navbar-expand-lg .navbar-nav .nav-link{padding-left:.6rem;padding-right:.6rem}nav .nav-item.compact .nav-link{padding-left:.5rem;padding-right:.5rem;font-size:1.1rem}.navbar .quarto-navbar-tools{order:3}.navbar .quarto-navbar-tools div.dropdown{display:inline-block}.navbar .quarto-navbar-tools .quarto-navigation-tool{color:#fdfeff}.navbar .quarto-navbar-tools .quarto-navigation-tool:hover{color:#fdfdff}.navbar-nav .dropdown-menu{min-width:220px;font-size:.9rem}.navbar .navbar-nav .nav-link.dropdown-toggle::after{opacity:.75;vertical-align:.175em}.navbar ul.dropdown-menu{padding-top:0;padding-bottom:0}.navbar .dropdown-header{text-transform:uppercase;font-size:.8rem;padding:0 .5rem}.navbar .dropdown-item{padding:.4rem .5rem}.navbar .dropdown-item>i.bi{margin-left:.1rem;margin-right:.25em}.sidebar #quarto-search{margin-top:-1px}.sidebar #quarto-search svg.aa-SubmitIcon{width:16px;height:16px}.sidebar-navigation a{color:inherit}.sidebar-title{margin-top:.25rem;padding-bottom:.5rem;font-size:1.3rem;line-height:1.6rem;visibility:visible}.sidebar-title>a{font-size:inherit;text-decoration:none}.sidebar-title .sidebar-tools-main{margin-top:-6px}@media(max-width: 991.98px){#quarto-sidebar div.sidebar-header{padding-top:.2em}}.sidebar-header-stacked .sidebar-title{margin-top:.6rem}.sidebar-logo{max-width:90%;padding-bottom:.5rem}.sidebar-logo-link{text-decoration:none}.sidebar-navigation li a{text-decoration:none}.sidebar-navigation .quarto-navigation-tool{opacity:.7;font-size:.875rem}#quarto-sidebar>nav>.sidebar-tools-main{margin-left:14px}.sidebar-tools-main{display:inline-flex;margin-left:0px;order:2}.sidebar-tools-main:not(.tools-wide){vertical-align:middle}.sidebar-navigation .quarto-navigation-tool.dropdown-toggle::after{display:none}.sidebar.sidebar-navigation>*{padding-top:1em}.sidebar-item{margin-bottom:.2em;line-height:1rem;margin-top:.4rem}.sidebar-section{padding-left:.5em;padding-bottom:.2em}.sidebar-item .sidebar-item-container{display:flex;justify-content:space-between;cursor:pointer}.sidebar-item-toggle:hover{cursor:pointer}.sidebar-item .sidebar-item-toggle .bi{font-size:.7rem;text-align:center}.sidebar-item .sidebar-item-toggle .bi-chevron-right::before{transition:transform 200ms ease}.sidebar-item .sidebar-item-toggle[aria-expanded=false] .bi-chevron-right::before{transform:none}.sidebar-item .sidebar-item-toggle[aria-expanded=true] .bi-chevron-right::before{transform:rotate(90deg)}.sidebar-item-text{width:100%}.sidebar-navigation .sidebar-divider{margin-left:0;margin-right:0;margin-top:.5rem;margin-bottom:.5rem}@media(max-width: 991.98px){.quarto-secondary-nav{display:block}.quarto-secondary-nav button.quarto-search-button{padding-right:0em;padding-left:2em}.quarto-secondary-nav button.quarto-btn-toggle{margin-left:-0.75rem;margin-right:.15rem}.quarto-secondary-nav nav.quarto-title-breadcrumbs{display:none}.quarto-secondary-nav nav.quarto-page-breadcrumbs{display:flex;align-items:center;padding-right:1em;margin-left:-0.25em}.quarto-secondary-nav nav.quarto-page-breadcrumbs a{text-decoration:none}.quarto-secondary-nav nav.quarto-page-breadcrumbs ol.breadcrumb{margin-bottom:0}}@media(min-width: 992px){.quarto-secondary-nav{display:none}}.quarto-title-breadcrumbs .breadcrumb{margin-bottom:.5em;font-size:.9rem}.quarto-title-breadcrumbs .breadcrumb li:last-of-type a{color:#6c757d}.quarto-secondary-nav .quarto-btn-toggle{color:#595959}.quarto-secondary-nav[aria-expanded=false] .quarto-btn-toggle .bi-chevron-right::before{transform:none}.quarto-secondary-nav[aria-expanded=true] .quarto-btn-toggle .bi-chevron-right::before{transform:rotate(90deg)}.quarto-secondary-nav .quarto-btn-toggle .bi-chevron-right::before{transition:transform 200ms ease}.quarto-secondary-nav{cursor:pointer}.no-decor{text-decoration:none}.quarto-secondary-nav-title{margin-top:.3em;color:#595959;padding-top:4px}.quarto-secondary-nav nav.quarto-page-breadcrumbs{color:#595959}.quarto-secondary-nav nav.quarto-page-breadcrumbs a{color:#595959}.quarto-secondary-nav nav.quarto-page-breadcrumbs a:hover{color:rgba(33,81,191,.8)}.quarto-secondary-nav nav.quarto-page-breadcrumbs .breadcrumb-item::before{color:#8c8c8c}.breadcrumb-item{line-height:1.2rem}div.sidebar-item-container{color:#595959}div.sidebar-item-container:hover,div.sidebar-item-container:focus{color:rgba(33,81,191,.8)}div.sidebar-item-container.disabled{color:rgba(89,89,89,.75)}div.sidebar-item-container .active,div.sidebar-item-container .show>.nav-link,div.sidebar-item-container .sidebar-link>code{color:#2151bf}div.sidebar.sidebar-navigation.rollup.quarto-sidebar-toggle-contents,nav.sidebar.sidebar-navigation:not(.rollup){background-color:#fff}@media(max-width: 991.98px){.sidebar-navigation .sidebar-item a,.nav-page .nav-page-text,.sidebar-navigation{font-size:1rem}.sidebar-navigation ul.sidebar-section.depth1 .sidebar-section-item{font-size:1.1rem}.sidebar-logo{display:none}.sidebar.sidebar-navigation{position:static;border-bottom:1px solid #dee2e6}.sidebar.sidebar-navigation.collapsing{position:fixed;z-index:1000}.sidebar.sidebar-navigation.show{position:fixed;z-index:1000}.sidebar.sidebar-navigation{min-height:100%}nav.quarto-secondary-nav{background-color:#fff;border-bottom:1px solid #dee2e6}.quarto-banner nav.quarto-secondary-nav{background-color:#2780e3;color:#fdfeff;border-top:1px solid #dee2e6}.sidebar .sidebar-footer{visibility:visible;padding-top:1rem;position:inherit}.sidebar-tools-collapse{display:block}}#quarto-sidebar{transition:width .15s ease-in}#quarto-sidebar>*{padding-right:1em}@media(max-width: 991.98px){#quarto-sidebar .sidebar-menu-container{white-space:nowrap;min-width:225px}#quarto-sidebar.show{transition:width .15s ease-out}}@media(min-width: 992px){#quarto-sidebar{display:flex;flex-direction:column}.nav-page .nav-page-text,.sidebar-navigation .sidebar-section .sidebar-item{font-size:.875rem}.sidebar-navigation .sidebar-item{font-size:.925rem}.sidebar.sidebar-navigation{display:block;position:sticky}.sidebar-search{width:100%}.sidebar .sidebar-footer{visibility:visible}}@media(max-width: 991.98px){#quarto-sidebar-glass{position:fixed;top:0;bottom:0;left:0;right:0;background-color:rgba(255,255,255,0);transition:background-color .15s ease-in;z-index:-1}#quarto-sidebar-glass.collapsing{z-index:1000}#quarto-sidebar-glass.show{transition:background-color .15s ease-out;background-color:rgba(102,102,102,.4);z-index:1000}}.sidebar .sidebar-footer{padding:.5rem 1rem;align-self:flex-end;color:#6c757d;width:100%}.quarto-page-breadcrumbs .breadcrumb-item+.breadcrumb-item,.quarto-page-breadcrumbs .breadcrumb-item{padding-right:.33em;padding-left:0}.quarto-page-breadcrumbs .breadcrumb-item::before{padding-right:.33em}.quarto-sidebar-footer{font-size:.875em}.sidebar-section .bi-chevron-right{vertical-align:middle}.sidebar-section .bi-chevron-right::before{font-size:.9em}.notransition{-webkit-transition:none !important;-moz-transition:none !important;-o-transition:none !important;transition:none !important}.btn:focus:not(:focus-visible){box-shadow:none}.page-navigation{display:flex;justify-content:space-between}.nav-page{padding-bottom:.75em}.nav-page .bi{font-size:1.8rem;vertical-align:middle}.nav-page .nav-page-text{padding-left:.25em;padding-right:.25em}.nav-page a{color:#6c757d;text-decoration:none;display:flex;align-items:center}.nav-page a:hover{color:#1f4eb6}.nav-footer .toc-actions{padding-bottom:.5em;padding-top:.5em}.nav-footer .toc-actions a,.nav-footer .toc-actions a:hover{text-decoration:none}.nav-footer .toc-actions ul{display:flex;list-style:none}.nav-footer .toc-actions ul :first-child{margin-left:auto}.nav-footer .toc-actions ul :last-child{margin-right:auto}.nav-footer .toc-actions ul li{padding-right:1.5em}.nav-footer .toc-actions ul li i.bi{padding-right:.4em}.nav-footer .toc-actions ul li:last-of-type{padding-right:0}.nav-footer{display:flex;flex-direction:row;flex-wrap:wrap;justify-content:space-between;align-items:baseline;text-align:center;padding-top:.5rem;padding-bottom:.5rem;background-color:#fff}body.nav-fixed{padding-top:64px}.nav-footer-contents{color:#6c757d;margin-top:.25rem}.nav-footer{min-height:3.5em;color:#757575}.nav-footer a{color:#757575}.nav-footer .nav-footer-left{font-size:.825em}.nav-footer .nav-footer-center{font-size:.825em}.nav-footer .nav-footer-right{font-size:.825em}.nav-footer-left .footer-items,.nav-footer-center .footer-items,.nav-footer-right .footer-items{display:inline-flex;padding-top:.3em;padding-bottom:.3em;margin-bottom:0em}.nav-footer-left .footer-items .nav-link,.nav-footer-center .footer-items .nav-link,.nav-footer-right .footer-items .nav-link{padding-left:.6em;padding-right:.6em}.nav-footer-left{flex:1 1 0px;text-align:left}.nav-footer-right{flex:1 1 0px;text-align:right}.nav-footer-center{flex:1 1 0px;min-height:3em;text-align:center}.nav-footer-center .footer-items{justify-content:center}@media(max-width: 767.98px){.nav-footer-center{margin-top:3em}}.navbar .quarto-reader-toggle.reader .quarto-reader-toggle-btn{background-color:#fdfeff;border-radius:3px}@media(max-width: 991.98px){.quarto-reader-toggle{display:none}}.quarto-reader-toggle.reader.quarto-navigation-tool .quarto-reader-toggle-btn{background-color:#595959;border-radius:3px}.quarto-reader-toggle .quarto-reader-toggle-btn{display:inline-flex;padding-left:.2em;padding-right:.2em;margin-left:-0.2em;margin-right:-0.2em;text-align:center}.navbar .quarto-reader-toggle:not(.reader) .bi::before{background-image:url('data:image/svg+xml,')}.navbar .quarto-reader-toggle.reader .bi::before{background-image:url('data:image/svg+xml,')}.sidebar-navigation .quarto-reader-toggle:not(.reader) .bi::before{background-image:url('data:image/svg+xml,')}.sidebar-navigation .quarto-reader-toggle.reader .bi::before{background-image:url('data:image/svg+xml,')}#quarto-back-to-top{display:none;position:fixed;bottom:50px;background-color:#fff;border-radius:.25rem;box-shadow:0 .2rem .5rem #6c757d,0 0 .05rem #6c757d;color:#6c757d;text-decoration:none;font-size:.9em;text-align:center;left:50%;padding:.4rem .8rem;transform:translate(-50%, 0)}.aa-DetachedSearchButtonQuery{display:none}.aa-DetachedOverlay ul.aa-List,#quarto-search-results ul.aa-List{list-style:none;padding-left:0}.aa-DetachedOverlay .aa-Panel,#quarto-search-results .aa-Panel{background-color:#fff;position:absolute;z-index:2000}#quarto-search-results .aa-Panel{max-width:400px}#quarto-search input{font-size:.925rem}@media(min-width: 992px){.navbar #quarto-search{margin-left:.25rem;order:999}}.navbar.navbar-expand-sm #quarto-search,.navbar.navbar-expand-md #quarto-search{order:999}@media(min-width: 992px){.navbar .quarto-navbar-tools{margin-left:auto;order:900}}@media(max-width: 991.98px){#quarto-sidebar .sidebar-search{display:none}}#quarto-sidebar .sidebar-search .aa-Autocomplete{width:100%}.navbar .aa-Autocomplete .aa-Form{width:180px}.navbar #quarto-search.type-overlay .aa-Autocomplete{width:40px}.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form{background-color:inherit;border:none}.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form:focus-within{box-shadow:none;outline:none}.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form .aa-InputWrapper{display:none}.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form .aa-InputWrapper:focus-within{display:inherit}.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form .aa-Label svg,.navbar #quarto-search.type-overlay .aa-Autocomplete .aa-Form .aa-LoadingIndicator svg{width:26px;height:26px;color:#fdfeff;opacity:1}.navbar #quarto-search.type-overlay .aa-Autocomplete svg.aa-SubmitIcon{width:26px;height:26px;color:#fdfeff;opacity:1}.aa-Autocomplete .aa-Form,.aa-DetachedFormContainer .aa-Form{align-items:center;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;color:#343a40;display:flex;line-height:1em;margin:0;position:relative;width:100%}.aa-Autocomplete .aa-Form:focus-within,.aa-DetachedFormContainer .aa-Form:focus-within{box-shadow:rgba(39,128,227,.6) 0 0 0 1px;outline:currentColor none medium}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix{align-items:center;display:flex;flex-shrink:0;order:1}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-Label,.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-Label,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator{cursor:initial;flex-shrink:0;padding:0;text-align:left}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-Label svg,.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator svg,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-Label svg,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator svg{color:#343a40;opacity:.5}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-SubmitButton,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-SubmitButton{appearance:none;background:none;border:0;margin:0}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator{align-items:center;display:flex;justify-content:center}.aa-Autocomplete .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator[hidden],.aa-DetachedFormContainer .aa-Form .aa-InputWrapperPrefix .aa-LoadingIndicator[hidden]{display:none}.aa-Autocomplete .aa-Form .aa-InputWrapper,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper{order:3;position:relative;width:100%}.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input{appearance:none;background:none;border:0;color:#343a40;font:inherit;height:calc(1.5em + .1rem + 2px);padding:0;width:100%}.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input::placeholder,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input::placeholder{color:#343a40;opacity:.8}.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input:focus,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input:focus{border-color:none;box-shadow:none;outline:none}.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-decoration,.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-cancel-button,.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-results-button,.aa-Autocomplete .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-results-decoration,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-decoration,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-cancel-button,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-results-button,.aa-DetachedFormContainer .aa-Form .aa-InputWrapper .aa-Input::-webkit-search-results-decoration{display:none}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix{align-items:center;display:flex;order:4}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-ClearButton,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-ClearButton{align-items:center;background:none;border:0;color:#343a40;opacity:.8;cursor:pointer;display:flex;margin:0;width:calc(1.5em + .1rem + 2px)}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-ClearButton:hover,.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-ClearButton:focus,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-ClearButton:hover,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-ClearButton:focus{color:#343a40;opacity:.8}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-ClearButton[hidden],.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-ClearButton[hidden]{display:none}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-ClearButton svg,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-ClearButton svg{width:calc(1.5em + 0.75rem + calc(1px * 2))}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-CopyButton,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-CopyButton{border:none;align-items:center;background:none;color:#343a40;opacity:.4;font-size:.7rem;cursor:pointer;display:none;margin:0;width:calc(1em + .1rem + 2px)}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-CopyButton:hover,.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-CopyButton:focus,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-CopyButton:hover,.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-CopyButton:focus{color:#343a40;opacity:.8}.aa-Autocomplete .aa-Form .aa-InputWrapperSuffix .aa-CopyButton[hidden],.aa-DetachedFormContainer .aa-Form .aa-InputWrapperSuffix .aa-CopyButton[hidden]{display:none}.aa-PanelLayout:empty{display:none}.quarto-search-no-results.no-query{display:none}.aa-Source:has(.no-query){display:none}#quarto-search-results .aa-Panel{border:solid #dee2e6 1px}#quarto-search-results .aa-SourceNoResults{width:398px}.aa-DetachedOverlay .aa-Panel,#quarto-search-results .aa-Panel{max-height:65vh;overflow-y:auto;font-size:.925rem}.aa-DetachedOverlay .aa-SourceNoResults,#quarto-search-results .aa-SourceNoResults{height:60px;display:flex;justify-content:center;align-items:center}.aa-DetachedOverlay .search-error,#quarto-search-results .search-error{padding-top:10px;padding-left:20px;padding-right:20px;cursor:default}.aa-DetachedOverlay .search-error .search-error-title,#quarto-search-results .search-error .search-error-title{font-size:1.1rem;margin-bottom:.5rem}.aa-DetachedOverlay .search-error .search-error-title .search-error-icon,#quarto-search-results .search-error .search-error-title .search-error-icon{margin-right:8px}.aa-DetachedOverlay .search-error .search-error-text,#quarto-search-results .search-error .search-error-text{font-weight:300}.aa-DetachedOverlay .search-result-text,#quarto-search-results .search-result-text{font-weight:300;overflow:hidden;text-overflow:ellipsis;display:-webkit-box;-webkit-line-clamp:2;-webkit-box-orient:vertical;line-height:1.2rem;max-height:2.4rem}.aa-DetachedOverlay .aa-SourceHeader .search-result-header,#quarto-search-results .aa-SourceHeader .search-result-header{font-size:.875rem;background-color:#f2f2f2;padding-left:14px;padding-bottom:4px;padding-top:4px}.aa-DetachedOverlay .aa-SourceHeader .search-result-header-no-results,#quarto-search-results .aa-SourceHeader .search-result-header-no-results{display:none}.aa-DetachedOverlay .aa-SourceFooter .algolia-search-logo,#quarto-search-results .aa-SourceFooter .algolia-search-logo{width:110px;opacity:.85;margin:8px;float:right}.aa-DetachedOverlay .search-result-section,#quarto-search-results .search-result-section{font-size:.925em}.aa-DetachedOverlay a.search-result-link,#quarto-search-results a.search-result-link{color:inherit;text-decoration:none}.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item,#quarto-search-results li.aa-Item[aria-selected=true] .search-item{background-color:#2780e3}.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item.search-result-more,.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item .search-result-section,.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item .search-result-text,.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item .search-result-title-container,.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item .search-result-text-container,#quarto-search-results li.aa-Item[aria-selected=true] .search-item.search-result-more,#quarto-search-results li.aa-Item[aria-selected=true] .search-item .search-result-section,#quarto-search-results li.aa-Item[aria-selected=true] .search-item .search-result-text,#quarto-search-results li.aa-Item[aria-selected=true] .search-item .search-result-title-container,#quarto-search-results li.aa-Item[aria-selected=true] .search-item .search-result-text-container{color:#fff;background-color:#2780e3}.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item mark.search-match,.aa-DetachedOverlay li.aa-Item[aria-selected=true] .search-item .search-match.mark,#quarto-search-results li.aa-Item[aria-selected=true] .search-item mark.search-match,#quarto-search-results li.aa-Item[aria-selected=true] .search-item .search-match.mark{color:#fff;background-color:#4b95e8}.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item,#quarto-search-results li.aa-Item[aria-selected=false] .search-item{background-color:#fff}.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item.search-result-more,.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item .search-result-section,.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item .search-result-text,.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item .search-result-title-container,.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item .search-result-text-container,#quarto-search-results li.aa-Item[aria-selected=false] .search-item.search-result-more,#quarto-search-results li.aa-Item[aria-selected=false] .search-item .search-result-section,#quarto-search-results li.aa-Item[aria-selected=false] .search-item .search-result-text,#quarto-search-results li.aa-Item[aria-selected=false] .search-item .search-result-title-container,#quarto-search-results li.aa-Item[aria-selected=false] .search-item .search-result-text-container{color:#343a40}.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item mark.search-match,.aa-DetachedOverlay li.aa-Item[aria-selected=false] .search-item .search-match.mark,#quarto-search-results li.aa-Item[aria-selected=false] .search-item mark.search-match,#quarto-search-results li.aa-Item[aria-selected=false] .search-item .search-match.mark{color:inherit;background-color:#e5effc}.aa-DetachedOverlay .aa-Item .search-result-doc:not(.document-selectable) .search-result-title-container,#quarto-search-results .aa-Item .search-result-doc:not(.document-selectable) .search-result-title-container{background-color:#fff;color:#343a40}.aa-DetachedOverlay .aa-Item .search-result-doc:not(.document-selectable) .search-result-text-container,#quarto-search-results .aa-Item .search-result-doc:not(.document-selectable) .search-result-text-container{padding-top:0px}.aa-DetachedOverlay li.aa-Item .search-result-doc.document-selectable .search-result-text-container,#quarto-search-results li.aa-Item .search-result-doc.document-selectable .search-result-text-container{margin-top:-4px}.aa-DetachedOverlay .aa-Item,#quarto-search-results .aa-Item{cursor:pointer}.aa-DetachedOverlay .aa-Item .search-item,#quarto-search-results .aa-Item .search-item{border-left:none;border-right:none;border-top:none;background-color:#fff;border-color:#dee2e6;color:#343a40}.aa-DetachedOverlay .aa-Item .search-item p,#quarto-search-results .aa-Item .search-item p{margin-top:0;margin-bottom:0}.aa-DetachedOverlay .aa-Item .search-item i.bi,#quarto-search-results .aa-Item .search-item i.bi{padding-left:8px;padding-right:8px;font-size:1.3em}.aa-DetachedOverlay .aa-Item .search-item .search-result-title,#quarto-search-results .aa-Item .search-item .search-result-title{margin-top:.3em;margin-bottom:0em}.aa-DetachedOverlay .aa-Item .search-item .search-result-crumbs,#quarto-search-results .aa-Item .search-item .search-result-crumbs{white-space:nowrap;text-overflow:ellipsis;font-size:.8em;font-weight:300;margin-right:1em}.aa-DetachedOverlay .aa-Item .search-item .search-result-crumbs:not(.search-result-crumbs-wrap),#quarto-search-results .aa-Item .search-item .search-result-crumbs:not(.search-result-crumbs-wrap){max-width:30%;margin-left:auto;margin-top:.5em;margin-bottom:.1rem}.aa-DetachedOverlay .aa-Item .search-item .search-result-crumbs.search-result-crumbs-wrap,#quarto-search-results .aa-Item .search-item .search-result-crumbs.search-result-crumbs-wrap{flex-basis:100%;margin-top:0em;margin-bottom:.2em;margin-left:37px}.aa-DetachedOverlay .aa-Item .search-result-title-container,#quarto-search-results .aa-Item .search-result-title-container{font-size:1em;display:flex;flex-wrap:wrap;padding:6px 4px 6px 4px}.aa-DetachedOverlay .aa-Item .search-result-text-container,#quarto-search-results .aa-Item .search-result-text-container{padding-bottom:8px;padding-right:8px;margin-left:42px}.aa-DetachedOverlay .aa-Item .search-result-doc-section,.aa-DetachedOverlay .aa-Item .search-result-more,#quarto-search-results .aa-Item .search-result-doc-section,#quarto-search-results .aa-Item .search-result-more{padding-top:8px;padding-bottom:8px;padding-left:44px}.aa-DetachedOverlay .aa-Item .search-result-more,#quarto-search-results .aa-Item .search-result-more{font-size:.8em;font-weight:400}.aa-DetachedOverlay .aa-Item .search-result-doc,#quarto-search-results .aa-Item .search-result-doc{border-top:1px solid #dee2e6}.aa-DetachedSearchButton{background:none;border:none}.aa-DetachedSearchButton .aa-DetachedSearchButtonPlaceholder{display:none}.navbar .aa-DetachedSearchButton .aa-DetachedSearchButtonIcon{color:#fdfeff}.sidebar-tools-collapse #quarto-search,.sidebar-tools-main #quarto-search{display:inline}.sidebar-tools-collapse #quarto-search .aa-Autocomplete,.sidebar-tools-main #quarto-search .aa-Autocomplete{display:inline}.sidebar-tools-collapse #quarto-search .aa-DetachedSearchButton,.sidebar-tools-main #quarto-search .aa-DetachedSearchButton{padding-left:4px;padding-right:4px}.sidebar-tools-collapse #quarto-search .aa-DetachedSearchButton .aa-DetachedSearchButtonIcon,.sidebar-tools-main #quarto-search .aa-DetachedSearchButton .aa-DetachedSearchButtonIcon{color:#595959}.sidebar-tools-collapse #quarto-search .aa-DetachedSearchButton .aa-DetachedSearchButtonIcon .aa-SubmitIcon,.sidebar-tools-main #quarto-search .aa-DetachedSearchButton .aa-DetachedSearchButtonIcon .aa-SubmitIcon{margin-top:-3px}.aa-DetachedContainer{background:rgba(255,255,255,.65);width:90%;bottom:0;box-shadow:rgba(222,226,230,.6) 0 0 0 1px;outline:currentColor none medium;display:flex;flex-direction:column;left:0;margin:0;overflow:hidden;padding:0;position:fixed;right:0;top:0;z-index:1101}.aa-DetachedContainer::after{height:32px}.aa-DetachedContainer .aa-SourceHeader{margin:var(--aa-spacing-half) 0 var(--aa-spacing-half) 2px}.aa-DetachedContainer .aa-Panel{background-color:#fff;border-radius:0;box-shadow:none;flex-grow:1;margin:0;padding:0;position:relative}.aa-DetachedContainer .aa-PanelLayout{bottom:0;box-shadow:none;left:0;margin:0;max-height:none;overflow-y:auto;position:absolute;right:0;top:0;width:100%}.aa-DetachedFormContainer{background-color:#fff;border-bottom:1px solid #dee2e6;display:flex;flex-direction:row;justify-content:space-between;margin:0;padding:.5em}.aa-DetachedCancelButton{background:none;font-size:.8em;border:0;border-radius:3px;color:#343a40;cursor:pointer;margin:0 0 0 .5em;padding:0 .5em}.aa-DetachedCancelButton:hover,.aa-DetachedCancelButton:focus{box-shadow:rgba(39,128,227,.6) 0 0 0 1px;outline:currentColor none medium}.aa-DetachedContainer--modal{bottom:inherit;height:auto;margin:0 auto;position:absolute;top:100px;border-radius:6px;max-width:850px}@media(max-width: 575.98px){.aa-DetachedContainer--modal{width:100%;top:0px;border-radius:0px;border:none}}.aa-DetachedContainer--modal .aa-PanelLayout{max-height:var(--aa-detached-modal-max-height);padding-bottom:var(--aa-spacing-half);position:static}.aa-Detached{height:100vh;overflow:hidden}.aa-DetachedOverlay{background-color:rgba(52,58,64,.4);position:fixed;left:0;right:0;top:0;margin:0;padding:0;height:100vh;z-index:1100}.quarto-dashboard.nav-fixed.dashboard-sidebar #quarto-content.quarto-dashboard-content{padding:0em}.quarto-dashboard #quarto-content.quarto-dashboard-content{padding:1em}.quarto-dashboard #quarto-content.quarto-dashboard-content>*{padding-top:0}@media(min-width: 576px){.quarto-dashboard{height:100%}}.quarto-dashboard .card.valuebox.bslib-card.bg-primary{background-color:#5397e9 !important}.quarto-dashboard .card.valuebox.bslib-card.bg-secondary{background-color:#343a40 !important}.quarto-dashboard .card.valuebox.bslib-card.bg-success{background-color:#3aa716 !important}.quarto-dashboard .card.valuebox.bslib-card.bg-info{background-color:rgba(153,84,187,.7019607843) !important}.quarto-dashboard .card.valuebox.bslib-card.bg-warning{background-color:#fa6400 !important}.quarto-dashboard .card.valuebox.bslib-card.bg-danger{background-color:rgba(255,0,57,.7019607843) !important}.quarto-dashboard .card.valuebox.bslib-card.bg-light{background-color:#f8f9fa !important}.quarto-dashboard .card.valuebox.bslib-card.bg-dark{background-color:#343a40 !important}.quarto-dashboard.dashboard-fill{display:flex;flex-direction:column}.quarto-dashboard #quarto-appendix{display:none}.quarto-dashboard #quarto-header #quarto-dashboard-header{border-top:solid 1px #549be9;border-bottom:solid 1px #549be9}.quarto-dashboard #quarto-header #quarto-dashboard-header>nav{padding-left:1em;padding-right:1em}.quarto-dashboard #quarto-header #quarto-dashboard-header>nav .navbar-brand-container{padding-left:0}.quarto-dashboard #quarto-header #quarto-dashboard-header .navbar-toggler{margin-right:0}.quarto-dashboard #quarto-header #quarto-dashboard-header .navbar-toggler-icon{height:1em;width:1em;background-image:url('data:image/svg+xml,')}.quarto-dashboard #quarto-header #quarto-dashboard-header .navbar-brand-container{padding-right:1em}.quarto-dashboard #quarto-header #quarto-dashboard-header .navbar-title{font-size:1.1em}.quarto-dashboard #quarto-header #quarto-dashboard-header .navbar-nav{font-size:.9em}.quarto-dashboard #quarto-dashboard-header .navbar{padding:0}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-container{padding-left:1em}.quarto-dashboard #quarto-dashboard-header .navbar.slim .navbar-brand-container .nav-link,.quarto-dashboard #quarto-dashboard-header .navbar.slim .navbar-nav .nav-link{padding:.7em}.quarto-dashboard #quarto-dashboard-header .navbar .quarto-color-scheme-toggle{order:9}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-toggler{margin-left:.5em;order:10}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-nav .nav-link{padding:.5em;height:100%;display:flex;align-items:center}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-nav .active{background-color:#4b95e8}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-brand-container{padding:.5em .5em .5em 0;display:flex;flex-direction:row;margin-right:2em;align-items:center}@media(max-width: 767.98px){.quarto-dashboard #quarto-dashboard-header .navbar .navbar-brand-container{margin-right:auto}}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-collapse{align-self:stretch}@media(min-width: 768px){.quarto-dashboard #quarto-dashboard-header .navbar .navbar-collapse{order:8}}@media(max-width: 767.98px){.quarto-dashboard #quarto-dashboard-header .navbar .navbar-collapse{order:1000;padding-bottom:.5em}}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-collapse .navbar-nav{align-self:stretch}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-title{font-size:1.25em;line-height:1.1em;display:flex;flex-direction:row;flex-wrap:wrap;align-items:baseline}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-title .navbar-title-text{margin-right:.4em}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-title a{text-decoration:none;color:inherit}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-subtitle,.quarto-dashboard #quarto-dashboard-header .navbar .navbar-author{font-size:.9rem;margin-right:.5em}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-author{margin-left:auto}.quarto-dashboard #quarto-dashboard-header .navbar .navbar-logo{max-height:48px;min-height:30px;object-fit:cover;margin-right:1em}.quarto-dashboard #quarto-dashboard-header .navbar .quarto-dashboard-links{order:9;padding-right:1em}.quarto-dashboard #quarto-dashboard-header .navbar .quarto-dashboard-link-text{margin-left:.25em}.quarto-dashboard #quarto-dashboard-header .navbar .quarto-dashboard-link{padding-right:0em;padding-left:.7em;text-decoration:none;color:#fdfeff}.quarto-dashboard .page-layout-custom .tab-content{padding:0;border:none}.quarto-dashboard-img-contain{height:100%;width:100%;object-fit:contain}@media(max-width: 575.98px){.quarto-dashboard .bslib-grid{grid-template-rows:minmax(1em, max-content) !important}.quarto-dashboard .sidebar-content{height:inherit}.quarto-dashboard .page-layout-custom{min-height:100vh}}.quarto-dashboard.dashboard-toolbar>.page-layout-custom,.quarto-dashboard.dashboard-sidebar>.page-layout-custom{padding:0}.quarto-dashboard .quarto-dashboard-content.quarto-dashboard-pages{padding:0}.quarto-dashboard .callout{margin-bottom:0;margin-top:0}.quarto-dashboard .html-fill-container figure{overflow:hidden}.quarto-dashboard bslib-tooltip .rounded-pill{border:solid #6c757d 1px}.quarto-dashboard bslib-tooltip .rounded-pill .svg{fill:#343a40}.quarto-dashboard .tabset .dashboard-card-no-title .nav-tabs{margin-left:0;margin-right:auto}.quarto-dashboard .tabset .tab-content{border:none}.quarto-dashboard .tabset .card-header .nav-link[role=tab]{margin-top:-6px;padding-top:6px;padding-bottom:6px}.quarto-dashboard .card.valuebox,.quarto-dashboard .card.bslib-value-box{min-height:3rem}.quarto-dashboard .card.valuebox .card-body,.quarto-dashboard .card.bslib-value-box .card-body{padding:0}.quarto-dashboard .bslib-value-box .value-box-value{font-size:clamp(.1em,15cqw,5em)}.quarto-dashboard .bslib-value-box .value-box-showcase .bi{font-size:clamp(.1em,max(18cqw,5.2cqh),5em);text-align:center;height:1em}.quarto-dashboard .bslib-value-box .value-box-showcase .bi::before{vertical-align:1em}.quarto-dashboard .bslib-value-box .value-box-area{margin-top:auto;margin-bottom:auto}.quarto-dashboard .card figure.quarto-float{display:flex;flex-direction:column;align-items:center}.quarto-dashboard .dashboard-scrolling{padding:1em}.quarto-dashboard .full-height{height:100%}.quarto-dashboard .showcase-bottom .value-box-grid{display:grid;grid-template-columns:1fr;grid-template-rows:1fr auto;grid-template-areas:"top" "bottom"}.quarto-dashboard .showcase-bottom .value-box-grid .value-box-showcase{grid-area:bottom;padding:0;margin:0}.quarto-dashboard .showcase-bottom .value-box-grid .value-box-showcase i.bi{font-size:4rem}.quarto-dashboard .showcase-bottom .value-box-grid .value-box-area{grid-area:top}.quarto-dashboard .tab-content{margin-bottom:0}.quarto-dashboard .bslib-card .bslib-navs-card-title{justify-content:stretch;align-items:end}.quarto-dashboard .card-header{display:flex;flex-wrap:wrap;justify-content:space-between}.quarto-dashboard .card-header .card-title{display:flex;flex-direction:column;justify-content:center;margin-bottom:0}.quarto-dashboard .tabset .card-toolbar{margin-bottom:1em}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout{border:none;gap:var(--bslib-spacer, 1rem)}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout>.main{padding:0}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout>.sidebar{border-radius:.25rem;border:1px solid rgba(0,0,0,.175)}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout>.collapse-toggle{display:none}@media(max-width: 767.98px){.quarto-dashboard .bslib-grid>.bslib-sidebar-layout{grid-template-columns:1fr;grid-template-rows:max-content 1fr}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout>.main{grid-column:1;grid-row:2}.quarto-dashboard .bslib-grid>.bslib-sidebar-layout .sidebar{grid-column:1;grid-row:1}}.quarto-dashboard .sidebar-right .sidebar{padding-left:2.5em}.quarto-dashboard .sidebar-right .collapse-toggle{left:2px}.quarto-dashboard .quarto-dashboard .sidebar-right button.collapse-toggle:not(.transitioning){left:unset}.quarto-dashboard aside.sidebar{padding-left:1em;padding-right:1em;background-color:rgba(52,58,64,.25);color:#343a40}.quarto-dashboard .bslib-sidebar-layout>div.main{padding:.7em}.quarto-dashboard .bslib-sidebar-layout button.collapse-toggle{margin-top:.3em}.quarto-dashboard .bslib-sidebar-layout .collapse-toggle{top:0}.quarto-dashboard .bslib-sidebar-layout.sidebar-collapsed:not(.transitioning):not(.sidebar-right) .collapse-toggle{left:2px}.quarto-dashboard .sidebar>section>.h3:first-of-type{margin-top:0em}.quarto-dashboard .sidebar .h3,.quarto-dashboard .sidebar .h4,.quarto-dashboard .sidebar .h5,.quarto-dashboard .sidebar .h6{margin-top:.5em}.quarto-dashboard .sidebar form{flex-direction:column;align-items:start;margin-bottom:1em}.quarto-dashboard .sidebar form div[class*=oi-][class$=-input]{flex-direction:column}.quarto-dashboard .sidebar form[class*=oi-][class$=-toggle]{flex-direction:row-reverse;align-items:center;justify-content:start}.quarto-dashboard .sidebar form input[type=range]{margin-top:.5em;margin-right:.8em;margin-left:1em}.quarto-dashboard .sidebar label{width:fit-content}.quarto-dashboard .sidebar .card-body{margin-bottom:2em}.quarto-dashboard .sidebar .shiny-input-container{margin-bottom:1em}.quarto-dashboard .sidebar .shiny-options-group{margin-top:0}.quarto-dashboard .sidebar .control-label{margin-bottom:.3em}.quarto-dashboard .card .card-body .quarto-layout-row{align-items:stretch}.quarto-dashboard .toolbar{font-size:.9em;display:flex;flex-direction:row;border-top:solid 1px #bcbfc0;padding:1em;flex-wrap:wrap;background-color:rgba(52,58,64,.25)}.quarto-dashboard .toolbar .cell-output-display{display:flex}.quarto-dashboard .toolbar .shiny-input-container{padding-bottom:.5em;margin-bottom:.5em;width:inherit}.quarto-dashboard .toolbar .shiny-input-container>.checkbox:first-child{margin-top:6px}.quarto-dashboard .toolbar>*:last-child{margin-right:0}.quarto-dashboard .toolbar>*>*{margin-right:1em;align-items:baseline}.quarto-dashboard .toolbar>*>*>a{text-decoration:none;margin-top:auto;margin-bottom:auto}.quarto-dashboard .toolbar .shiny-input-container{padding-bottom:0;margin-bottom:0}.quarto-dashboard .toolbar .shiny-input-container>*{flex-shrink:0;flex-grow:0}.quarto-dashboard .toolbar .form-group.shiny-input-container:not([role=group])>label{margin-bottom:0}.quarto-dashboard .toolbar .shiny-input-container.no-baseline{align-items:start;padding-top:6px}.quarto-dashboard .toolbar .shiny-input-container{display:flex;align-items:baseline}.quarto-dashboard .toolbar .shiny-input-container label{padding-right:.4em}.quarto-dashboard .toolbar .shiny-input-container .bslib-input-switch{margin-top:6px}.quarto-dashboard .toolbar input[type=text]{line-height:1;width:inherit}.quarto-dashboard .toolbar .input-daterange{width:inherit}.quarto-dashboard .toolbar .input-daterange input[type=text]{height:2.4em;width:10em}.quarto-dashboard .toolbar .input-daterange .input-group-addon{height:auto;padding:0;margin-left:-5px !important;margin-right:-5px}.quarto-dashboard .toolbar .input-daterange .input-group-addon .input-group-text{padding-top:0;padding-bottom:0;height:100%}.quarto-dashboard .toolbar span.irs.irs--shiny{width:10em}.quarto-dashboard .toolbar span.irs.irs--shiny .irs-line{top:9px}.quarto-dashboard .toolbar span.irs.irs--shiny .irs-min,.quarto-dashboard .toolbar span.irs.irs--shiny .irs-max,.quarto-dashboard .toolbar span.irs.irs--shiny .irs-from,.quarto-dashboard .toolbar span.irs.irs--shiny .irs-to,.quarto-dashboard .toolbar span.irs.irs--shiny .irs-single{top:20px}.quarto-dashboard .toolbar span.irs.irs--shiny .irs-bar{top:8px}.quarto-dashboard .toolbar span.irs.irs--shiny .irs-handle{top:0px}.quarto-dashboard .toolbar .shiny-input-checkboxgroup>label{margin-top:6px}.quarto-dashboard .toolbar .shiny-input-checkboxgroup>.shiny-options-group{margin-top:0;align-items:baseline}.quarto-dashboard .toolbar .shiny-input-radiogroup>label{margin-top:6px}.quarto-dashboard .toolbar .shiny-input-radiogroup>.shiny-options-group{align-items:baseline;margin-top:0}.quarto-dashboard .toolbar .shiny-input-radiogroup>.shiny-options-group>.radio{margin-right:.3em}.quarto-dashboard .toolbar .form-select{padding-top:.2em;padding-bottom:.2em}.quarto-dashboard .toolbar .shiny-input-select{min-width:6em}.quarto-dashboard .toolbar div.checkbox{margin-bottom:0px}.quarto-dashboard .toolbar>.checkbox:first-child{margin-top:6px}.quarto-dashboard .toolbar form{width:fit-content}.quarto-dashboard .toolbar form label{padding-top:.2em;padding-bottom:.2em;width:fit-content}.quarto-dashboard .toolbar form input[type=date]{width:fit-content}.quarto-dashboard .toolbar form input[type=color]{width:3em}.quarto-dashboard .toolbar form button{padding:.4em}.quarto-dashboard .toolbar form select{width:fit-content}.quarto-dashboard .toolbar>*{font-size:.9em;flex-grow:0}.quarto-dashboard .toolbar .shiny-input-container label{margin-bottom:1px}.quarto-dashboard .toolbar-bottom{margin-top:1em;margin-bottom:0 !important;order:2}.quarto-dashboard .quarto-dashboard-content>.dashboard-toolbar-container>.toolbar-content>.tab-content>.tab-pane>*:not(.bslib-sidebar-layout){padding:1em}.quarto-dashboard .quarto-dashboard-content>.dashboard-toolbar-container>.toolbar-content>*:not(.tab-content){padding:1em}.quarto-dashboard .quarto-dashboard-content>.tab-content>.dashboard-page>.dashboard-toolbar-container>.toolbar-content,.quarto-dashboard .quarto-dashboard-content>.tab-content>.dashboard-page:not(.dashboard-sidebar-container)>*:not(.dashboard-toolbar-container){padding:1em}.quarto-dashboard .toolbar-content{padding:0}.quarto-dashboard .quarto-dashboard-content.quarto-dashboard-pages .tab-pane>.dashboard-toolbar-container .toolbar{border-radius:0;margin-bottom:0}.quarto-dashboard .dashboard-toolbar-container.toolbar-toplevel .toolbar{border-bottom:1px solid rgba(0,0,0,.175)}.quarto-dashboard .dashboard-toolbar-container.toolbar-toplevel .toolbar-bottom{margin-top:0}.quarto-dashboard .dashboard-toolbar-container:not(.toolbar-toplevel) .toolbar{margin-bottom:1em;border-top:none;border-radius:.25rem;border:1px solid rgba(0,0,0,.175)}.quarto-dashboard .vega-embed.has-actions details{width:1.7em;height:2em;position:absolute !important;top:0;right:0}.quarto-dashboard .dashboard-toolbar-container{padding:0}.quarto-dashboard .card .card-header p:last-child,.quarto-dashboard .card .card-footer p:last-child{margin-bottom:0}.quarto-dashboard .card .card-body>.h4:first-child{margin-top:0}.quarto-dashboard .card .card-body{z-index:1000}@media(max-width: 767.98px){.quarto-dashboard .card .card-body .itables div.dataTables_wrapper div.dataTables_length,.quarto-dashboard .card .card-body .itables div.dataTables_wrapper div.dataTables_info,.quarto-dashboard .card .card-body .itables div.dataTables_wrapper div.dataTables_paginate{text-align:initial}.quarto-dashboard .card .card-body .itables div.dataTables_wrapper div.dataTables_filter{text-align:right}.quarto-dashboard .card .card-body .itables div.dataTables_wrapper div.dataTables_paginate ul.pagination{justify-content:initial}}.quarto-dashboard .card .card-body .itables .dataTables_wrapper{display:flex;flex-wrap:wrap;justify-content:space-between;align-items:center;padding-top:0}.quarto-dashboard .card .card-body .itables .dataTables_wrapper table{flex-shrink:0}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dt-buttons{margin-bottom:.5em;margin-left:auto;width:fit-content;float:right}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dt-buttons.btn-group{background:#fff;border:none}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dt-buttons .btn-secondary{background-color:#fff;background-image:none;border:solid #dee2e6 1px;padding:.2em .7em}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dt-buttons .btn span{font-size:.8em;color:#343a40}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_info{margin-left:.5em;margin-bottom:.5em;padding-top:0}@media(min-width: 768px){.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_info{font-size:.875em}}@media(max-width: 767.98px){.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_info{font-size:.8em}}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_filter{margin-bottom:.5em;font-size:.875em}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_filter input[type=search]{padding:1px 5px 1px 5px;font-size:.875em}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_length{flex-basis:1 1 50%;margin-bottom:.5em;font-size:.875em}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_length select{padding:.4em 3em .4em .5em;font-size:.875em;margin-left:.2em;margin-right:.2em}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_paginate{flex-shrink:0}@media(min-width: 768px){.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_paginate{margin-left:auto}}.quarto-dashboard .card .card-body .itables .dataTables_wrapper .dataTables_paginate ul.pagination .paginate_button .page-link{font-size:.8em}.quarto-dashboard .card .card-footer{font-size:.9em}.quarto-dashboard .card .card-toolbar{display:flex;flex-grow:1;flex-direction:row;width:100%;flex-wrap:wrap}.quarto-dashboard .card .card-toolbar>*{font-size:.8em;flex-grow:0}.quarto-dashboard .card .card-toolbar>.card-title{font-size:1em;flex-grow:1;align-self:flex-start;margin-top:.1em}.quarto-dashboard .card .card-toolbar .cell-output-display{display:flex}.quarto-dashboard .card .card-toolbar .shiny-input-container{padding-bottom:.5em;margin-bottom:.5em;width:inherit}.quarto-dashboard .card .card-toolbar .shiny-input-container>.checkbox:first-child{margin-top:6px}.quarto-dashboard .card .card-toolbar>*:last-child{margin-right:0}.quarto-dashboard .card .card-toolbar>*>*{margin-right:1em;align-items:baseline}.quarto-dashboard .card .card-toolbar>*>*>a{text-decoration:none;margin-top:auto;margin-bottom:auto}.quarto-dashboard .card .card-toolbar form{width:fit-content}.quarto-dashboard .card .card-toolbar form label{padding-top:.2em;padding-bottom:.2em;width:fit-content}.quarto-dashboard .card .card-toolbar form input[type=date]{width:fit-content}.quarto-dashboard .card .card-toolbar form input[type=color]{width:3em}.quarto-dashboard .card .card-toolbar form button{padding:.4em}.quarto-dashboard .card .card-toolbar form select{width:fit-content}.quarto-dashboard .card .card-toolbar .cell-output-display{display:flex}.quarto-dashboard .card .card-toolbar .shiny-input-container{padding-bottom:.5em;margin-bottom:.5em;width:inherit}.quarto-dashboard .card .card-toolbar .shiny-input-container>.checkbox:first-child{margin-top:6px}.quarto-dashboard .card .card-toolbar>*:last-child{margin-right:0}.quarto-dashboard .card .card-toolbar>*>*{margin-right:1em;align-items:baseline}.quarto-dashboard .card .card-toolbar>*>*>a{text-decoration:none;margin-top:auto;margin-bottom:auto}.quarto-dashboard .card .card-toolbar .shiny-input-container{padding-bottom:0;margin-bottom:0}.quarto-dashboard .card .card-toolbar .shiny-input-container>*{flex-shrink:0;flex-grow:0}.quarto-dashboard .card .card-toolbar .form-group.shiny-input-container:not([role=group])>label{margin-bottom:0}.quarto-dashboard .card .card-toolbar .shiny-input-container.no-baseline{align-items:start;padding-top:6px}.quarto-dashboard .card .card-toolbar .shiny-input-container{display:flex;align-items:baseline}.quarto-dashboard .card .card-toolbar .shiny-input-container label{padding-right:.4em}.quarto-dashboard .card .card-toolbar .shiny-input-container .bslib-input-switch{margin-top:6px}.quarto-dashboard .card .card-toolbar input[type=text]{line-height:1;width:inherit}.quarto-dashboard .card .card-toolbar .input-daterange{width:inherit}.quarto-dashboard .card .card-toolbar .input-daterange input[type=text]{height:2.4em;width:10em}.quarto-dashboard .card .card-toolbar .input-daterange .input-group-addon{height:auto;padding:0;margin-left:-5px !important;margin-right:-5px}.quarto-dashboard .card .card-toolbar .input-daterange .input-group-addon .input-group-text{padding-top:0;padding-bottom:0;height:100%}.quarto-dashboard .card .card-toolbar span.irs.irs--shiny{width:10em}.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-line{top:9px}.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-min,.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-max,.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-from,.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-to,.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-single{top:20px}.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-bar{top:8px}.quarto-dashboard .card .card-toolbar span.irs.irs--shiny .irs-handle{top:0px}.quarto-dashboard .card .card-toolbar .shiny-input-checkboxgroup>label{margin-top:6px}.quarto-dashboard .card .card-toolbar .shiny-input-checkboxgroup>.shiny-options-group{margin-top:0;align-items:baseline}.quarto-dashboard .card .card-toolbar .shiny-input-radiogroup>label{margin-top:6px}.quarto-dashboard .card .card-toolbar .shiny-input-radiogroup>.shiny-options-group{align-items:baseline;margin-top:0}.quarto-dashboard .card .card-toolbar .shiny-input-radiogroup>.shiny-options-group>.radio{margin-right:.3em}.quarto-dashboard .card .card-toolbar .form-select{padding-top:.2em;padding-bottom:.2em}.quarto-dashboard .card .card-toolbar .shiny-input-select{min-width:6em}.quarto-dashboard .card .card-toolbar div.checkbox{margin-bottom:0px}.quarto-dashboard .card .card-toolbar>.checkbox:first-child{margin-top:6px}.quarto-dashboard .card-body>table>thead{border-top:none}.quarto-dashboard .card-body>.table>:not(caption)>*>*{background-color:#fff}.quarto-listing{padding-bottom:1em}.listing-pagination{padding-top:.5em}ul.pagination{float:right;padding-left:8px;padding-top:.5em}ul.pagination li{padding-right:.75em}ul.pagination li.disabled a,ul.pagination li.active a{color:#fff;text-decoration:none}ul.pagination li:last-of-type{padding-right:0}.listing-actions-group{display:flex}.quarto-listing-filter{margin-bottom:1em;width:200px;margin-left:auto}.quarto-listing-sort{margin-bottom:1em;margin-right:auto;width:auto}.quarto-listing-sort .input-group-text{font-size:.8em}.input-group-text{border-right:none}.quarto-listing-sort select.form-select{font-size:.8em}.listing-no-matching{text-align:center;padding-top:2em;padding-bottom:3em;font-size:1em}#quarto-margin-sidebar .quarto-listing-category{padding-top:0;font-size:1rem}#quarto-margin-sidebar .quarto-listing-category-title{cursor:pointer;font-weight:600;font-size:1rem}.quarto-listing-category .category{cursor:pointer}.quarto-listing-category .category.active{font-weight:600}.quarto-listing-category.category-cloud{display:flex;flex-wrap:wrap;align-items:baseline}.quarto-listing-category.category-cloud .category{padding-right:5px}.quarto-listing-category.category-cloud .category-cloud-1{font-size:.75em}.quarto-listing-category.category-cloud .category-cloud-2{font-size:.95em}.quarto-listing-category.category-cloud .category-cloud-3{font-size:1.15em}.quarto-listing-category.category-cloud .category-cloud-4{font-size:1.35em}.quarto-listing-category.category-cloud .category-cloud-5{font-size:1.55em}.quarto-listing-category.category-cloud .category-cloud-6{font-size:1.75em}.quarto-listing-category.category-cloud .category-cloud-7{font-size:1.95em}.quarto-listing-category.category-cloud .category-cloud-8{font-size:2.15em}.quarto-listing-category.category-cloud .category-cloud-9{font-size:2.35em}.quarto-listing-category.category-cloud .category-cloud-10{font-size:2.55em}.quarto-listing-cols-1{grid-template-columns:repeat(1, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-1{grid-template-columns:repeat(1, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-1{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-2{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-2{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-2{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-3{grid-template-columns:repeat(3, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-3{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-3{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-4{grid-template-columns:repeat(4, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-4{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-4{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-5{grid-template-columns:repeat(5, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-5{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-5{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-6{grid-template-columns:repeat(6, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-6{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-6{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-7{grid-template-columns:repeat(7, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-7{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-7{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-8{grid-template-columns:repeat(8, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-8{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-8{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-9{grid-template-columns:repeat(9, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-9{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-9{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-10{grid-template-columns:repeat(10, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-10{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-10{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-11{grid-template-columns:repeat(11, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-11{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-11{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-cols-12{grid-template-columns:repeat(12, minmax(0, 1fr));gap:1.5em}@media(max-width: 767.98px){.quarto-listing-cols-12{grid-template-columns:repeat(2, minmax(0, 1fr));gap:1.5em}}@media(max-width: 575.98px){.quarto-listing-cols-12{grid-template-columns:minmax(0, 1fr);gap:1.5em}}.quarto-listing-grid{gap:1.5em}.quarto-grid-item.borderless{border:none}.quarto-grid-item.borderless .listing-categories .listing-category:last-of-type,.quarto-grid-item.borderless .listing-categories .listing-category:first-of-type{padding-left:0}.quarto-grid-item.borderless .listing-categories .listing-category{border:0}.quarto-grid-link{text-decoration:none;color:inherit}.quarto-grid-link:hover{text-decoration:none;color:inherit}.quarto-grid-item h5.title,.quarto-grid-item .title.h5{margin-top:0;margin-bottom:0}.quarto-grid-item .card-footer{display:flex;justify-content:space-between;font-size:.8em}.quarto-grid-item .card-footer p{margin-bottom:0}.quarto-grid-item p.card-img-top{margin-bottom:0}.quarto-grid-item p.card-img-top>img{object-fit:cover}.quarto-grid-item .card-other-values{margin-top:.5em;font-size:.8em}.quarto-grid-item .card-other-values tr{margin-bottom:.5em}.quarto-grid-item .card-other-values tr>td:first-of-type{font-weight:600;padding-right:1em;padding-left:1em;vertical-align:top}.quarto-grid-item div.post-contents{display:flex;flex-direction:column;text-decoration:none;height:100%}.quarto-grid-item .listing-item-img-placeholder{background-color:rgba(52,58,64,.25);flex-shrink:0}.quarto-grid-item .card-attribution{padding-top:1em;display:flex;gap:1em;text-transform:uppercase;color:#6c757d;font-weight:500;flex-grow:10;align-items:flex-end}.quarto-grid-item .description{padding-bottom:1em}.quarto-grid-item .card-attribution .date{align-self:flex-end}.quarto-grid-item .card-attribution.justify{justify-content:space-between}.quarto-grid-item .card-attribution.start{justify-content:flex-start}.quarto-grid-item .card-attribution.end{justify-content:flex-end}.quarto-grid-item .card-title{margin-bottom:.1em}.quarto-grid-item .card-subtitle{padding-top:.25em}.quarto-grid-item .card-text{font-size:.9em}.quarto-grid-item .listing-reading-time{padding-bottom:.25em}.quarto-grid-item .card-text-small{font-size:.8em}.quarto-grid-item .card-subtitle.subtitle{font-size:.9em;font-weight:600;padding-bottom:.5em}.quarto-grid-item .listing-categories{display:flex;flex-wrap:wrap;padding-bottom:5px}.quarto-grid-item .listing-categories .listing-category{color:#6c757d;border:solid 1px #dee2e6;border-radius:.25rem;text-transform:uppercase;font-size:.65em;padding-left:.5em;padding-right:.5em;padding-top:.15em;padding-bottom:.15em;cursor:pointer;margin-right:4px;margin-bottom:4px}.quarto-grid-item.card-right{text-align:right}.quarto-grid-item.card-right .listing-categories{justify-content:flex-end}.quarto-grid-item.card-left{text-align:left}.quarto-grid-item.card-center{text-align:center}.quarto-grid-item.card-center .listing-description{text-align:justify}.quarto-grid-item.card-center .listing-categories{justify-content:center}table.quarto-listing-table td.image{padding:0px}table.quarto-listing-table td.image img{width:100%;max-width:50px;object-fit:contain}table.quarto-listing-table a{text-decoration:none;word-break:keep-all}table.quarto-listing-table th a{color:inherit}table.quarto-listing-table th a.asc:after{margin-bottom:-2px;margin-left:5px;display:inline-block;height:1rem;width:1rem;background-repeat:no-repeat;background-size:1rem 1rem;background-image:url('data:image/svg+xml,');content:""}table.quarto-listing-table th a.desc:after{margin-bottom:-2px;margin-left:5px;display:inline-block;height:1rem;width:1rem;background-repeat:no-repeat;background-size:1rem 1rem;background-image:url('data:image/svg+xml,');content:""}table.quarto-listing-table.table-hover td{cursor:pointer}.quarto-post.image-left{flex-direction:row}.quarto-post.image-right{flex-direction:row-reverse}@media(max-width: 767.98px){.quarto-post.image-right,.quarto-post.image-left{gap:0em;flex-direction:column}.quarto-post .metadata{padding-bottom:1em;order:2}.quarto-post .body{order:1}.quarto-post .thumbnail{order:3}}.list.quarto-listing-default div:last-of-type{border-bottom:none}@media(min-width: 992px){.quarto-listing-container-default{margin-right:2em}}div.quarto-post{display:flex;gap:2em;margin-bottom:1.5em;border-bottom:1px solid #dee2e6}@media(max-width: 767.98px){div.quarto-post{padding-bottom:1em}}div.quarto-post .metadata{flex-basis:20%;flex-grow:0;margin-top:.2em;flex-shrink:10}div.quarto-post .thumbnail{flex-basis:30%;flex-grow:0;flex-shrink:0}div.quarto-post .thumbnail img{margin-top:.4em;width:100%;object-fit:cover}div.quarto-post .body{flex-basis:45%;flex-grow:1;flex-shrink:0}div.quarto-post .body h3.listing-title,div.quarto-post .body .listing-title.h3{margin-top:0px;margin-bottom:0px;border-bottom:none}div.quarto-post .body .listing-subtitle{font-size:.875em;margin-bottom:.5em;margin-top:.2em}div.quarto-post .body .description{font-size:.9em}div.quarto-post .body pre code{white-space:pre-wrap}div.quarto-post a{color:#343a40;text-decoration:none}div.quarto-post .metadata{display:flex;flex-direction:column;font-size:.8em;font-family:"Source Sans Pro",-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";flex-basis:33%}div.quarto-post .listing-categories{display:flex;flex-wrap:wrap;padding-bottom:5px}div.quarto-post .listing-categories .listing-category{color:#6c757d;border:solid 1px #dee2e6;border-radius:.25rem;text-transform:uppercase;font-size:.65em;padding-left:.5em;padding-right:.5em;padding-top:.15em;padding-bottom:.15em;cursor:pointer;margin-right:4px;margin-bottom:4px}div.quarto-post .listing-description{margin-bottom:.5em}div.quarto-about-jolla{display:flex !important;flex-direction:column;align-items:center;margin-top:10%;padding-bottom:1em}div.quarto-about-jolla .about-image{object-fit:cover;margin-left:auto;margin-right:auto;margin-bottom:1.5em}div.quarto-about-jolla img.round{border-radius:50%}div.quarto-about-jolla img.rounded{border-radius:10px}div.quarto-about-jolla .quarto-title h1.title,div.quarto-about-jolla .quarto-title .title.h1{text-align:center}div.quarto-about-jolla .quarto-title .description{text-align:center}div.quarto-about-jolla h2,div.quarto-about-jolla .h2{border-bottom:none}div.quarto-about-jolla .about-sep{width:60%}div.quarto-about-jolla main{text-align:center}div.quarto-about-jolla .about-links{display:flex}@media(min-width: 992px){div.quarto-about-jolla .about-links{flex-direction:row;column-gap:.8em;row-gap:15px;flex-wrap:wrap}}@media(max-width: 991.98px){div.quarto-about-jolla .about-links{flex-direction:column;row-gap:1em;width:100%;padding-bottom:1.5em}}div.quarto-about-jolla .about-link{color:#626d78;text-decoration:none;border:solid 1px}@media(min-width: 992px){div.quarto-about-jolla .about-link{font-size:.8em;padding:.25em .5em;border-radius:4px}}@media(max-width: 991.98px){div.quarto-about-jolla .about-link{font-size:1.1em;padding:.5em .5em;text-align:center;border-radius:6px}}div.quarto-about-jolla .about-link:hover{color:#2761e3}div.quarto-about-jolla .about-link i.bi{margin-right:.15em}div.quarto-about-solana{display:flex !important;flex-direction:column;padding-top:3em !important;padding-bottom:1em}div.quarto-about-solana .about-entity{display:flex !important;align-items:start;justify-content:space-between}@media(min-width: 992px){div.quarto-about-solana .about-entity{flex-direction:row}}@media(max-width: 991.98px){div.quarto-about-solana .about-entity{flex-direction:column-reverse;align-items:center;text-align:center}}div.quarto-about-solana .about-entity .entity-contents{display:flex;flex-direction:column}@media(max-width: 767.98px){div.quarto-about-solana .about-entity .entity-contents{width:100%}}div.quarto-about-solana .about-entity .about-image{object-fit:cover}@media(max-width: 991.98px){div.quarto-about-solana .about-entity .about-image{margin-bottom:1.5em}}div.quarto-about-solana .about-entity img.round{border-radius:50%}div.quarto-about-solana .about-entity img.rounded{border-radius:10px}div.quarto-about-solana .about-entity .about-links{display:flex;justify-content:left;padding-bottom:1.2em}@media(min-width: 992px){div.quarto-about-solana .about-entity .about-links{flex-direction:row;column-gap:.8em;row-gap:15px;flex-wrap:wrap}}@media(max-width: 991.98px){div.quarto-about-solana .about-entity .about-links{flex-direction:column;row-gap:1em;width:100%;padding-bottom:1.5em}}div.quarto-about-solana .about-entity .about-link{color:#626d78;text-decoration:none;border:solid 1px}@media(min-width: 992px){div.quarto-about-solana .about-entity .about-link{font-size:.8em;padding:.25em .5em;border-radius:4px}}@media(max-width: 991.98px){div.quarto-about-solana .about-entity .about-link{font-size:1.1em;padding:.5em .5em;text-align:center;border-radius:6px}}div.quarto-about-solana .about-entity .about-link:hover{color:#2761e3}div.quarto-about-solana .about-entity .about-link i.bi{margin-right:.15em}div.quarto-about-solana .about-contents{padding-right:1.5em;flex-basis:0;flex-grow:1}div.quarto-about-solana .about-contents main.content{margin-top:0}div.quarto-about-solana .about-contents h2,div.quarto-about-solana .about-contents .h2{border-bottom:none}div.quarto-about-trestles{display:flex !important;flex-direction:row;padding-top:3em !important;padding-bottom:1em}@media(max-width: 991.98px){div.quarto-about-trestles{flex-direction:column;padding-top:0em !important}}div.quarto-about-trestles .about-entity{display:flex !important;flex-direction:column;align-items:center;text-align:center;padding-right:1em}@media(min-width: 992px){div.quarto-about-trestles .about-entity{flex:0 0 42%}}div.quarto-about-trestles .about-entity .about-image{object-fit:cover;margin-bottom:1.5em}div.quarto-about-trestles .about-entity img.round{border-radius:50%}div.quarto-about-trestles .about-entity img.rounded{border-radius:10px}div.quarto-about-trestles .about-entity .about-links{display:flex;justify-content:center}@media(min-width: 992px){div.quarto-about-trestles .about-entity .about-links{flex-direction:row;column-gap:.8em;row-gap:15px;flex-wrap:wrap}}@media(max-width: 991.98px){div.quarto-about-trestles .about-entity .about-links{flex-direction:column;row-gap:1em;width:100%;padding-bottom:1.5em}}div.quarto-about-trestles .about-entity .about-link{color:#626d78;text-decoration:none;border:solid 1px}@media(min-width: 992px){div.quarto-about-trestles .about-entity .about-link{font-size:.8em;padding:.25em .5em;border-radius:4px}}@media(max-width: 991.98px){div.quarto-about-trestles .about-entity .about-link{font-size:1.1em;padding:.5em .5em;text-align:center;border-radius:6px}}div.quarto-about-trestles .about-entity .about-link:hover{color:#2761e3}div.quarto-about-trestles .about-entity .about-link i.bi{margin-right:.15em}div.quarto-about-trestles .about-contents{flex-basis:0;flex-grow:1}div.quarto-about-trestles .about-contents h2,div.quarto-about-trestles .about-contents .h2{border-bottom:none}@media(min-width: 992px){div.quarto-about-trestles .about-contents{border-left:solid 1px #dee2e6;padding-left:1.5em}}div.quarto-about-trestles .about-contents main.content{margin-top:0}div.quarto-about-marquee{padding-bottom:1em}div.quarto-about-marquee .about-contents{display:flex;flex-direction:column}div.quarto-about-marquee .about-image{max-height:550px;margin-bottom:1.5em;object-fit:cover}div.quarto-about-marquee img.round{border-radius:50%}div.quarto-about-marquee img.rounded{border-radius:10px}div.quarto-about-marquee h2,div.quarto-about-marquee .h2{border-bottom:none}div.quarto-about-marquee .about-links{display:flex;justify-content:center;padding-top:1.5em}@media(min-width: 992px){div.quarto-about-marquee .about-links{flex-direction:row;column-gap:.8em;row-gap:15px;flex-wrap:wrap}}@media(max-width: 991.98px){div.quarto-about-marquee .about-links{flex-direction:column;row-gap:1em;width:100%;padding-bottom:1.5em}}div.quarto-about-marquee .about-link{color:#626d78;text-decoration:none;border:solid 1px}@media(min-width: 992px){div.quarto-about-marquee .about-link{font-size:.8em;padding:.25em .5em;border-radius:4px}}@media(max-width: 991.98px){div.quarto-about-marquee .about-link{font-size:1.1em;padding:.5em .5em;text-align:center;border-radius:6px}}div.quarto-about-marquee .about-link:hover{color:#2761e3}div.quarto-about-marquee .about-link i.bi{margin-right:.15em}@media(min-width: 992px){div.quarto-about-marquee .about-link{border:none}}div.quarto-about-broadside{display:flex;flex-direction:column;padding-bottom:1em}div.quarto-about-broadside .about-main{display:flex !important;padding-top:0 !important}@media(min-width: 992px){div.quarto-about-broadside .about-main{flex-direction:row;align-items:flex-start}}@media(max-width: 991.98px){div.quarto-about-broadside .about-main{flex-direction:column}}@media(max-width: 991.98px){div.quarto-about-broadside .about-main .about-entity{flex-shrink:0;width:100%;height:450px;margin-bottom:1.5em;background-size:cover;background-repeat:no-repeat}}@media(min-width: 992px){div.quarto-about-broadside .about-main .about-entity{flex:0 10 50%;margin-right:1.5em;width:100%;height:100%;background-size:100%;background-repeat:no-repeat}}div.quarto-about-broadside .about-main .about-contents{padding-top:14px;flex:0 0 50%}div.quarto-about-broadside h2,div.quarto-about-broadside .h2{border-bottom:none}div.quarto-about-broadside .about-sep{margin-top:1.5em;width:60%;align-self:center}div.quarto-about-broadside .about-links{display:flex;justify-content:center;column-gap:20px;padding-top:1.5em}@media(min-width: 992px){div.quarto-about-broadside .about-links{flex-direction:row;column-gap:.8em;row-gap:15px;flex-wrap:wrap}}@media(max-width: 991.98px){div.quarto-about-broadside .about-links{flex-direction:column;row-gap:1em;width:100%;padding-bottom:1.5em}}div.quarto-about-broadside .about-link{color:#626d78;text-decoration:none;border:solid 1px}@media(min-width: 992px){div.quarto-about-broadside .about-link{font-size:.8em;padding:.25em .5em;border-radius:4px}}@media(max-width: 991.98px){div.quarto-about-broadside .about-link{font-size:1.1em;padding:.5em .5em;text-align:center;border-radius:6px}}div.quarto-about-broadside .about-link:hover{color:#2761e3}div.quarto-about-broadside .about-link i.bi{margin-right:.15em}@media(min-width: 992px){div.quarto-about-broadside .about-link{border:none}}.tippy-box[data-theme~=quarto]{background-color:#fff;border:solid 1px #dee2e6;border-radius:.25rem;color:#343a40;font-size:.875rem}.tippy-box[data-theme~=quarto]>.tippy-backdrop{background-color:#fff}.tippy-box[data-theme~=quarto]>.tippy-arrow:after,.tippy-box[data-theme~=quarto]>.tippy-svg-arrow:after{content:"";position:absolute;z-index:-1}.tippy-box[data-theme~=quarto]>.tippy-arrow:after{border-color:rgba(0,0,0,0);border-style:solid}.tippy-box[data-placement^=top]>.tippy-arrow:before{bottom:-6px}.tippy-box[data-placement^=bottom]>.tippy-arrow:before{top:-6px}.tippy-box[data-placement^=right]>.tippy-arrow:before{left:-6px}.tippy-box[data-placement^=left]>.tippy-arrow:before{right:-6px}.tippy-box[data-theme~=quarto][data-placement^=top]>.tippy-arrow:before{border-top-color:#fff}.tippy-box[data-theme~=quarto][data-placement^=top]>.tippy-arrow:after{border-top-color:#dee2e6;border-width:7px 7px 0;top:17px;left:1px}.tippy-box[data-theme~=quarto][data-placement^=top]>.tippy-svg-arrow>svg{top:16px}.tippy-box[data-theme~=quarto][data-placement^=top]>.tippy-svg-arrow:after{top:17px}.tippy-box[data-theme~=quarto][data-placement^=bottom]>.tippy-arrow:before{border-bottom-color:#fff;bottom:16px}.tippy-box[data-theme~=quarto][data-placement^=bottom]>.tippy-arrow:after{border-bottom-color:#dee2e6;border-width:0 7px 7px;bottom:17px;left:1px}.tippy-box[data-theme~=quarto][data-placement^=bottom]>.tippy-svg-arrow>svg{bottom:15px}.tippy-box[data-theme~=quarto][data-placement^=bottom]>.tippy-svg-arrow:after{bottom:17px}.tippy-box[data-theme~=quarto][data-placement^=left]>.tippy-arrow:before{border-left-color:#fff}.tippy-box[data-theme~=quarto][data-placement^=left]>.tippy-arrow:after{border-left-color:#dee2e6;border-width:7px 0 7px 7px;left:17px;top:1px}.tippy-box[data-theme~=quarto][data-placement^=left]>.tippy-svg-arrow>svg{left:11px}.tippy-box[data-theme~=quarto][data-placement^=left]>.tippy-svg-arrow:after{left:12px}.tippy-box[data-theme~=quarto][data-placement^=right]>.tippy-arrow:before{border-right-color:#fff;right:16px}.tippy-box[data-theme~=quarto][data-placement^=right]>.tippy-arrow:after{border-width:7px 7px 7px 0;right:17px;top:1px;border-right-color:#dee2e6}.tippy-box[data-theme~=quarto][data-placement^=right]>.tippy-svg-arrow>svg{right:11px}.tippy-box[data-theme~=quarto][data-placement^=right]>.tippy-svg-arrow:after{right:12px}.tippy-box[data-theme~=quarto]>.tippy-svg-arrow{fill:#343a40}.tippy-box[data-theme~=quarto]>.tippy-svg-arrow:after{background-image:url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTYiIGhlaWdodD0iNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48cGF0aCBkPSJNMCA2czEuNzk2LS4wMTMgNC42Ny0zLjYxNUM1Ljg1MS45IDYuOTMuMDA2IDggMGMxLjA3LS4wMDYgMi4xNDguODg3IDMuMzQzIDIuMzg1QzE0LjIzMyA2LjAwNSAxNiA2IDE2IDZIMHoiIGZpbGw9InJnYmEoMCwgOCwgMTYsIDAuMikiLz48L3N2Zz4=);background-size:16px 6px;width:16px;height:6px}.top-right{position:absolute;top:1em;right:1em}.visually-hidden{border:0;clip:rect(0 0 0 0);height:auto;margin:0;overflow:hidden;padding:0;position:absolute;width:1px;white-space:nowrap}.hidden{display:none !important}.zindex-bottom{z-index:-1 !important}figure.figure{display:block}.quarto-layout-panel{margin-bottom:1em}.quarto-layout-panel>figure{width:100%}.quarto-layout-panel>figure>figcaption,.quarto-layout-panel>.panel-caption{margin-top:10pt}.quarto-layout-panel>.table-caption{margin-top:0px}.table-caption p{margin-bottom:.5em}.quarto-layout-row{display:flex;flex-direction:row;align-items:flex-start}.quarto-layout-valign-top{align-items:flex-start}.quarto-layout-valign-bottom{align-items:flex-end}.quarto-layout-valign-center{align-items:center}.quarto-layout-cell{position:relative;margin-right:20px}.quarto-layout-cell:last-child{margin-right:0}.quarto-layout-cell figure,.quarto-layout-cell>p{margin:.2em}.quarto-layout-cell img{max-width:100%}.quarto-layout-cell .html-widget{width:100% !important}.quarto-layout-cell div figure p{margin:0}.quarto-layout-cell figure{display:block;margin-inline-start:0;margin-inline-end:0}.quarto-layout-cell table{display:inline-table}.quarto-layout-cell-subref figcaption,figure .quarto-layout-row figure figcaption{text-align:center;font-style:italic}.quarto-figure{position:relative;margin-bottom:1em}.quarto-figure>figure{width:100%;margin-bottom:0}.quarto-figure-left>figure>p,.quarto-figure-left>figure>div{text-align:left}.quarto-figure-center>figure>p,.quarto-figure-center>figure>div{text-align:center}.quarto-figure-right>figure>p,.quarto-figure-right>figure>div{text-align:right}.quarto-figure>figure>div.cell-annotation,.quarto-figure>figure>div code{text-align:left}figure>p:empty{display:none}figure>p:first-child{margin-top:0;margin-bottom:0}figure>figcaption.quarto-float-caption-bottom{margin-bottom:.5em}figure>figcaption.quarto-float-caption-top{margin-top:.5em}div[id^=tbl-]{position:relative}.quarto-figure>.anchorjs-link{position:absolute;top:.6em;right:.5em}div[id^=tbl-]>.anchorjs-link{position:absolute;top:.7em;right:.3em}.quarto-figure:hover>.anchorjs-link,div[id^=tbl-]:hover>.anchorjs-link,h2:hover>.anchorjs-link,.h2:hover>.anchorjs-link,h3:hover>.anchorjs-link,.h3:hover>.anchorjs-link,h4:hover>.anchorjs-link,.h4:hover>.anchorjs-link,h5:hover>.anchorjs-link,.h5:hover>.anchorjs-link,h6:hover>.anchorjs-link,.h6:hover>.anchorjs-link,.reveal-anchorjs-link>.anchorjs-link{opacity:1}#title-block-header{margin-block-end:1rem;position:relative;margin-top:-1px}#title-block-header .abstract{margin-block-start:1rem}#title-block-header .abstract .abstract-title{font-weight:600}#title-block-header a{text-decoration:none}#title-block-header .author,#title-block-header .date,#title-block-header .doi{margin-block-end:.2rem}#title-block-header .quarto-title-block>div{display:flex}#title-block-header .quarto-title-block>div>h1,#title-block-header .quarto-title-block>div>.h1{flex-grow:1}#title-block-header .quarto-title-block>div>button{flex-shrink:0;height:2.25rem;margin-top:0}@media(min-width: 992px){#title-block-header .quarto-title-block>div>button{margin-top:5px}}tr.header>th>p:last-of-type{margin-bottom:0px}table,table.table{margin-top:.5rem;margin-bottom:.5rem}caption,.table-caption{padding-top:.5rem;padding-bottom:.5rem;text-align:center}figure.quarto-float-tbl figcaption.quarto-float-caption-top{margin-top:.5rem;margin-bottom:.25rem;text-align:center}figure.quarto-float-tbl figcaption.quarto-float-caption-bottom{padding-top:.25rem;margin-bottom:.5rem;text-align:center}.utterances{max-width:none;margin-left:-8px}iframe{margin-bottom:1em}details{margin-bottom:1em}details[show]{margin-bottom:0}details>summary{color:#6c757d}details>summary>p:only-child{display:inline}pre.sourceCode,code.sourceCode{position:relative}p code:not(.sourceCode){white-space:pre-wrap}code{white-space:pre}@media print{code{white-space:pre-wrap}}pre>code{display:block}pre>code.sourceCode{white-space:pre}pre>code.sourceCode>span>a:first-child::before{text-decoration:none}pre.code-overflow-wrap>code.sourceCode{white-space:pre-wrap}pre.code-overflow-scroll>code.sourceCode{white-space:pre}code a:any-link{color:inherit;text-decoration:none}code a:hover{color:inherit;text-decoration:underline}ul.task-list{padding-left:1em}[data-tippy-root]{display:inline-block}.tippy-content .footnote-back{display:none}.footnote-back{margin-left:.2em}.tippy-content{overflow-x:auto}.quarto-embedded-source-code{display:none}.quarto-unresolved-ref{font-weight:600}.quarto-cover-image{max-width:35%;float:right;margin-left:30px}.cell-output-display .widget-subarea{margin-bottom:1em}.cell-output-display:not(.no-overflow-x),.knitsql-table:not(.no-overflow-x){overflow-x:auto}.panel-input{margin-bottom:1em}.panel-input>div,.panel-input>div>div{display:inline-block;vertical-align:top;padding-right:12px}.panel-input>p:last-child{margin-bottom:0}.layout-sidebar{margin-bottom:1em}.layout-sidebar .tab-content{border:none}.tab-content>.page-columns.active{display:grid}div.sourceCode>iframe{width:100%;height:300px;margin-bottom:-0.5em}a{text-underline-offset:3px}div.ansi-escaped-output{font-family:monospace;display:block}/*! +* +* ansi colors from IPython notebook's +* +* we also add `bright-[color]-` synonyms for the `-[color]-intense` classes since +* that seems to be what ansi_up emits +* +*/.ansi-black-fg{color:#3e424d}.ansi-black-bg{background-color:#3e424d}.ansi-black-intense-black,.ansi-bright-black-fg{color:#282c36}.ansi-black-intense-black,.ansi-bright-black-bg{background-color:#282c36}.ansi-red-fg{color:#e75c58}.ansi-red-bg{background-color:#e75c58}.ansi-red-intense-red,.ansi-bright-red-fg{color:#b22b31}.ansi-red-intense-red,.ansi-bright-red-bg{background-color:#b22b31}.ansi-green-fg{color:#00a250}.ansi-green-bg{background-color:#00a250}.ansi-green-intense-green,.ansi-bright-green-fg{color:#007427}.ansi-green-intense-green,.ansi-bright-green-bg{background-color:#007427}.ansi-yellow-fg{color:#ddb62b}.ansi-yellow-bg{background-color:#ddb62b}.ansi-yellow-intense-yellow,.ansi-bright-yellow-fg{color:#b27d12}.ansi-yellow-intense-yellow,.ansi-bright-yellow-bg{background-color:#b27d12}.ansi-blue-fg{color:#208ffb}.ansi-blue-bg{background-color:#208ffb}.ansi-blue-intense-blue,.ansi-bright-blue-fg{color:#0065ca}.ansi-blue-intense-blue,.ansi-bright-blue-bg{background-color:#0065ca}.ansi-magenta-fg{color:#d160c4}.ansi-magenta-bg{background-color:#d160c4}.ansi-magenta-intense-magenta,.ansi-bright-magenta-fg{color:#a03196}.ansi-magenta-intense-magenta,.ansi-bright-magenta-bg{background-color:#a03196}.ansi-cyan-fg{color:#60c6c8}.ansi-cyan-bg{background-color:#60c6c8}.ansi-cyan-intense-cyan,.ansi-bright-cyan-fg{color:#258f8f}.ansi-cyan-intense-cyan,.ansi-bright-cyan-bg{background-color:#258f8f}.ansi-white-fg{color:#c5c1b4}.ansi-white-bg{background-color:#c5c1b4}.ansi-white-intense-white,.ansi-bright-white-fg{color:#a1a6b2}.ansi-white-intense-white,.ansi-bright-white-bg{background-color:#a1a6b2}.ansi-default-inverse-fg{color:#fff}.ansi-default-inverse-bg{background-color:#000}.ansi-bold{font-weight:bold}.ansi-underline{text-decoration:underline}:root{--quarto-body-bg: #fff;--quarto-body-color: #343a40;--quarto-text-muted: #6c757d;--quarto-border-color: #dee2e6;--quarto-border-width: 1px;--quarto-border-radius: 0.25rem}table.gt_table{color:var(--quarto-body-color);font-size:1em;width:100%;background-color:rgba(0,0,0,0);border-top-width:inherit;border-bottom-width:inherit;border-color:var(--quarto-border-color)}table.gt_table th.gt_column_spanner_outer{color:var(--quarto-body-color);background-color:rgba(0,0,0,0);border-top-width:inherit;border-bottom-width:inherit;border-color:var(--quarto-border-color)}table.gt_table th.gt_col_heading{color:var(--quarto-body-color);font-weight:bold;background-color:rgba(0,0,0,0)}table.gt_table thead.gt_col_headings{border-bottom:1px solid currentColor;border-top-width:inherit;border-top-color:var(--quarto-border-color)}table.gt_table thead.gt_col_headings:not(:first-child){border-top-width:1px;border-top-color:var(--quarto-border-color)}table.gt_table td.gt_row{border-bottom-width:1px;border-bottom-color:var(--quarto-border-color);border-top-width:0px}table.gt_table tbody.gt_table_body{border-top-width:1px;border-bottom-width:1px;border-bottom-color:var(--quarto-border-color);border-top-color:currentColor}div.columns{display:initial;gap:initial}div.column{display:inline-block;overflow-x:initial;vertical-align:top;width:50%}.code-annotation-tip-content{word-wrap:break-word}.code-annotation-container-hidden{display:none !important}dl.code-annotation-container-grid{display:grid;grid-template-columns:min-content auto}dl.code-annotation-container-grid dt{grid-column:1}dl.code-annotation-container-grid dd{grid-column:2}pre.sourceCode.code-annotation-code{padding-right:0}code.sourceCode .code-annotation-anchor{z-index:100;position:relative;float:right;background-color:rgba(0,0,0,0)}input[type=checkbox]{margin-right:.5ch}:root{--mermaid-bg-color: #fff;--mermaid-edge-color: #343a40;--mermaid-node-fg-color: #343a40;--mermaid-fg-color: #343a40;--mermaid-fg-color--lighter: #4b545c;--mermaid-fg-color--lightest: #626d78;--mermaid-font-family: Source Sans Pro, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol;--mermaid-label-bg-color: #fff;--mermaid-label-fg-color: #2780e3;--mermaid-node-bg-color: rgba(39, 128, 227, 0.1);--mermaid-node-fg-color: #343a40}@media print{:root{font-size:11pt}#quarto-sidebar,#TOC,.nav-page{display:none}.page-columns .content{grid-column-start:page-start}.fixed-top{position:relative}.panel-caption,.figure-caption,figcaption{color:#666}}.code-copy-button{position:absolute;top:0;right:0;border:0;margin-top:5px;margin-right:5px;background-color:rgba(0,0,0,0);z-index:3}.code-copy-button:focus{outline:none}.code-copy-button-tooltip{font-size:.75em}pre.sourceCode:hover>.code-copy-button>.bi::before{display:inline-block;height:1rem;width:1rem;content:"";vertical-align:-0.125em;background-image:url('data:image/svg+xml,');background-repeat:no-repeat;background-size:1rem 1rem}pre.sourceCode:hover>.code-copy-button-checked>.bi::before{background-image:url('data:image/svg+xml,')}pre.sourceCode:hover>.code-copy-button:hover>.bi::before{background-image:url('data:image/svg+xml,')}pre.sourceCode:hover>.code-copy-button-checked:hover>.bi::before{background-image:url('data:image/svg+xml,')}main ol ol,main ul ul,main ol ul,main ul ol{margin-bottom:1em}ul>li:not(:has(>p))>ul,ol>li:not(:has(>p))>ul,ul>li:not(:has(>p))>ol,ol>li:not(:has(>p))>ol{margin-bottom:0}ul>li:not(:has(>p))>ul>li:has(>p),ol>li:not(:has(>p))>ul>li:has(>p),ul>li:not(:has(>p))>ol>li:has(>p),ol>li:not(:has(>p))>ol>li:has(>p){margin-top:1rem}body{margin:0}main.page-columns>header>h1.title,main.page-columns>header>.title.h1{margin-bottom:0}@media(min-width: 992px){body .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset] 35px [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(500px, calc(850px - 3em)) [body-content-end] 1.5em [body-end] 35px [body-end-outset] minmax(75px, 145px) [page-end-inset] 35px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.fullcontent:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset] 35px [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(500px, calc(850px - 3em)) [body-content-end] 1.5em [body-end] 35px [body-end-outset] 35px [page-end-inset page-end] 5fr [screen-end-inset] 1.5em}body.slimcontent:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset] 35px [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(500px, calc(850px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(0px, 200px) [page-end-inset] 35px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.listing:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start] minmax(50px, 100px) [page-start-inset] 50px [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(500px, calc(850px - 3em)) [body-content-end] 3em [body-end] 50px [body-end-outset] minmax(0px, 250px) [page-end-inset] minmax(50px, 100px) [page-end] 1fr [screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 35px [page-start-inset] minmax(0px, 175px) [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(450px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(0px, 200px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 35px [page-start-inset] minmax(0px, 175px) [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(450px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(0px, 200px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] minmax(25px, 50px) [page-start-inset] minmax(50px, 150px) [body-start-outset] minmax(25px, 50px) [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] minmax(25px, 50px) [body-end-outset] minmax(50px, 150px) [page-end-inset] minmax(25px, 50px) [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start] minmax(50px, 100px) [page-start-inset] 50px [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(500px, calc(1000px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(50px, 100px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked.fullcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start] minmax(50px, 100px) [page-start-inset] 50px [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(500px, calc(1000px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating.fullcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 50px [page-start-inset] minmax(50px, 150px) [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked.slimcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start] minmax(50px, 100px) [page-start-inset] 50px [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(450px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(0px, 200px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked.listing .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start] minmax(50px, 100px) [page-start-inset] 50px [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(500px, calc(1000px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(0px, 200px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating.slimcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 50px [page-start-inset] minmax(50px, 150px) [body-start-outset] 50px [body-start] 1.5em [body-content-start] minmax(450px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(50px, 150px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating.listing .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] minmax(25px, 50px) [page-start-inset] minmax(50px, 150px) [body-start-outset] minmax(25px, 50px) [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] minmax(25px, 50px) [body-end-outset] minmax(50px, 150px) [page-end-inset] minmax(25px, 50px) [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}}@media(max-width: 991.98px){body .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset] 5fr [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] 35px [body-end-outset] minmax(75px, 145px) [page-end-inset] 35px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.fullcontent:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset] 5fr [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.slimcontent:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset] 5fr [body-start] 1.5em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end] 35px [body-end-outset] minmax(75px, 145px) [page-end-inset] 35px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.listing:not(.floating):not(.docked) .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset] 5fr [body-start] 1.5em [body-content-start] minmax(500px, calc(1250px - 3em)) [body-content-end body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 35px [page-start-inset] minmax(0px, 145px) [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(450px, calc(800px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start] 35px [page-start-inset] minmax(0px, 145px) [body-start-outset] 35px [body-start] 1.5em [body-content-start] minmax(450px, calc(800px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset body-start-outset body-start] 1.5em [body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(75px, 150px) [page-end-inset] 25px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(25px, 50px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked.fullcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(500px, calc(1000px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating.fullcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset body-start-outset body-start] 1em [body-content-start] minmax(500px, calc(800px - 3em)) [body-content-end] 1.5em [body-end body-end-outset page-end-inset page-end] 4fr [screen-end-inset] 1.5em [screen-end]}body.docked.slimcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(25px, 50px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.docked.listing .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(25px, 50px) [page-end-inset] 50px [page-end] 5fr [screen-end-inset] 1.5em [screen-end]}body.floating.slimcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset body-start-outset body-start] 1em [body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 35px [body-end-outset] minmax(75px, 145px) [page-end-inset] 35px [page-end] 4fr [screen-end-inset] 1.5em [screen-end]}body.floating.listing .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset] 5fr [page-start page-start-inset body-start-outset body-start] 1em [body-content-start] minmax(500px, calc(750px - 3em)) [body-content-end] 1.5em [body-end] 50px [body-end-outset] minmax(75px, 150px) [page-end-inset] 25px [page-end] 4fr [screen-end-inset] 1.5em [screen-end]}}@media(max-width: 767.98px){body .page-columns,body.fullcontent:not(.floating):not(.docked) .page-columns,body.slimcontent:not(.floating):not(.docked) .page-columns,body.docked .page-columns,body.docked.slimcontent .page-columns,body.docked.fullcontent .page-columns,body.floating .page-columns,body.floating.slimcontent .page-columns,body.floating.fullcontent .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(0px, 1fr) [body-content-end body-end body-end-outset page-end-inset page-end screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(0px, 1fr) [body-content-end body-end body-end-outset page-end-inset page-end screen-end-inset] 1.5em [screen-end]}body:not(.floating):not(.docked) .page-columns.toc-left .page-columns{display:grid;gap:0;grid-template-columns:[screen-start] 1.5em [screen-start-inset page-start page-start-inset body-start-outset body-start body-content-start] minmax(0px, 1fr) [body-content-end body-end body-end-outset page-end-inset page-end screen-end-inset] 1.5em [screen-end]}nav[role=doc-toc]{display:none}}body,.page-row-navigation{grid-template-rows:[page-top] max-content [contents-top] max-content [contents-bottom] max-content [page-bottom]}.page-rows-contents{grid-template-rows:[content-top] minmax(max-content, 1fr) [content-bottom] minmax(60px, max-content) [page-bottom]}.page-full{grid-column:screen-start/screen-end !important}.page-columns>*{grid-column:body-content-start/body-content-end}.page-columns.column-page>*{grid-column:page-start/page-end}.page-columns.column-page-left .page-columns.page-full>*,.page-columns.column-page-left>*{grid-column:page-start/body-content-end}.page-columns.column-page-right .page-columns.page-full>*,.page-columns.column-page-right>*{grid-column:body-content-start/page-end}.page-rows{grid-auto-rows:auto}.header{grid-column:screen-start/screen-end;grid-row:page-top/contents-top}#quarto-content{padding:0;grid-column:screen-start/screen-end;grid-row:contents-top/contents-bottom}body.floating .sidebar.sidebar-navigation{grid-column:page-start/body-start;grid-row:content-top/page-bottom}body.docked .sidebar.sidebar-navigation{grid-column:screen-start/body-start;grid-row:content-top/page-bottom}.sidebar.toc-left{grid-column:page-start/body-start;grid-row:content-top/page-bottom}.sidebar.margin-sidebar{grid-column:body-end/page-end;grid-row:content-top/page-bottom}.page-columns .content{grid-column:body-content-start/body-content-end;grid-row:content-top/content-bottom;align-content:flex-start}.page-columns .page-navigation{grid-column:body-content-start/body-content-end;grid-row:content-bottom/page-bottom}.page-columns .footer{grid-column:screen-start/screen-end;grid-row:contents-bottom/page-bottom}.page-columns .column-body{grid-column:body-content-start/body-content-end}.page-columns .column-body-fullbleed{grid-column:body-start/body-end}.page-columns .column-body-outset{grid-column:body-start-outset/body-end-outset;z-index:998;opacity:.999}.page-columns .column-body-outset table{background:#fff}.page-columns .column-body-outset-left{grid-column:body-start-outset/body-content-end;z-index:998;opacity:.999}.page-columns .column-body-outset-left table{background:#fff}.page-columns .column-body-outset-right{grid-column:body-content-start/body-end-outset;z-index:998;opacity:.999}.page-columns .column-body-outset-right table{background:#fff}.page-columns .column-page{grid-column:page-start/page-end;z-index:998;opacity:.999}.page-columns .column-page table{background:#fff}.page-columns .column-page-inset{grid-column:page-start-inset/page-end-inset;z-index:998;opacity:.999}.page-columns .column-page-inset table{background:#fff}.page-columns .column-page-inset-left{grid-column:page-start-inset/body-content-end;z-index:998;opacity:.999}.page-columns .column-page-inset-left table{background:#fff}.page-columns .column-page-inset-right{grid-column:body-content-start/page-end-inset;z-index:998;opacity:.999}.page-columns .column-page-inset-right figcaption table{background:#fff}.page-columns .column-page-left{grid-column:page-start/body-content-end;z-index:998;opacity:.999}.page-columns .column-page-left table{background:#fff}.page-columns .column-page-right{grid-column:body-content-start/page-end;z-index:998;opacity:.999}.page-columns .column-page-right figcaption table{background:#fff}#quarto-content.page-columns #quarto-margin-sidebar,#quarto-content.page-columns #quarto-sidebar{z-index:1}@media(max-width: 991.98px){#quarto-content.page-columns #quarto-margin-sidebar.collapse,#quarto-content.page-columns #quarto-sidebar.collapse,#quarto-content.page-columns #quarto-margin-sidebar.collapsing,#quarto-content.page-columns #quarto-sidebar.collapsing{z-index:1055}}#quarto-content.page-columns main.column-page,#quarto-content.page-columns main.column-page-right,#quarto-content.page-columns main.column-page-left{z-index:0}.page-columns .column-screen-inset{grid-column:screen-start-inset/screen-end-inset;z-index:998;opacity:.999}.page-columns .column-screen-inset table{background:#fff}.page-columns .column-screen-inset-left{grid-column:screen-start-inset/body-content-end;z-index:998;opacity:.999}.page-columns .column-screen-inset-left table{background:#fff}.page-columns .column-screen-inset-right{grid-column:body-content-start/screen-end-inset;z-index:998;opacity:.999}.page-columns .column-screen-inset-right table{background:#fff}.page-columns .column-screen{grid-column:screen-start/screen-end;z-index:998;opacity:.999}.page-columns .column-screen table{background:#fff}.page-columns .column-screen-left{grid-column:screen-start/body-content-end;z-index:998;opacity:.999}.page-columns .column-screen-left table{background:#fff}.page-columns .column-screen-right{grid-column:body-content-start/screen-end;z-index:998;opacity:.999}.page-columns .column-screen-right table{background:#fff}.page-columns .column-screen-inset-shaded{grid-column:screen-start/screen-end;padding:1em;background:#f8f9fa;z-index:998;opacity:.999;margin-bottom:1em}.zindex-content{z-index:998;opacity:.999}.zindex-modal{z-index:1055;opacity:.999}.zindex-over-content{z-index:999;opacity:.999}img.img-fluid.column-screen,img.img-fluid.column-screen-inset-shaded,img.img-fluid.column-screen-inset,img.img-fluid.column-screen-inset-left,img.img-fluid.column-screen-inset-right,img.img-fluid.column-screen-left,img.img-fluid.column-screen-right{width:100%}@media(min-width: 992px){.margin-caption,div.aside,aside:not(.footnotes):not(.sidebar),.column-margin{grid-column:body-end/page-end !important;z-index:998}.column-sidebar{grid-column:page-start/body-start !important;z-index:998}.column-leftmargin{grid-column:screen-start-inset/body-start !important;z-index:998}.no-row-height{height:1em;overflow:visible}}@media(max-width: 991.98px){.margin-caption,div.aside,aside:not(.footnotes):not(.sidebar),.column-margin{grid-column:body-end/page-end !important;z-index:998}.no-row-height{height:1em;overflow:visible}.page-columns.page-full{overflow:visible}.page-columns.toc-left .margin-caption,.page-columns.toc-left div.aside,.page-columns.toc-left aside:not(.footnotes):not(.sidebar),.page-columns.toc-left .column-margin{grid-column:body-content-start/body-content-end !important;z-index:998;opacity:.999}.page-columns.toc-left .no-row-height{height:initial;overflow:initial}}@media(max-width: 767.98px){.margin-caption,div.aside,aside:not(.footnotes):not(.sidebar),.column-margin{grid-column:body-content-start/body-content-end !important;z-index:998;opacity:.999}.no-row-height{height:initial;overflow:initial}#quarto-margin-sidebar{display:none}#quarto-sidebar-toc-left{display:none}.hidden-sm{display:none}}.panel-grid{display:grid;grid-template-rows:repeat(1, 1fr);grid-template-columns:repeat(24, 1fr);gap:1em}.panel-grid .g-col-1{grid-column:auto/span 1}.panel-grid .g-col-2{grid-column:auto/span 2}.panel-grid .g-col-3{grid-column:auto/span 3}.panel-grid .g-col-4{grid-column:auto/span 4}.panel-grid .g-col-5{grid-column:auto/span 5}.panel-grid .g-col-6{grid-column:auto/span 6}.panel-grid .g-col-7{grid-column:auto/span 7}.panel-grid .g-col-8{grid-column:auto/span 8}.panel-grid .g-col-9{grid-column:auto/span 9}.panel-grid .g-col-10{grid-column:auto/span 10}.panel-grid .g-col-11{grid-column:auto/span 11}.panel-grid .g-col-12{grid-column:auto/span 12}.panel-grid .g-col-13{grid-column:auto/span 13}.panel-grid .g-col-14{grid-column:auto/span 14}.panel-grid .g-col-15{grid-column:auto/span 15}.panel-grid .g-col-16{grid-column:auto/span 16}.panel-grid .g-col-17{grid-column:auto/span 17}.panel-grid .g-col-18{grid-column:auto/span 18}.panel-grid .g-col-19{grid-column:auto/span 19}.panel-grid .g-col-20{grid-column:auto/span 20}.panel-grid .g-col-21{grid-column:auto/span 21}.panel-grid .g-col-22{grid-column:auto/span 22}.panel-grid .g-col-23{grid-column:auto/span 23}.panel-grid .g-col-24{grid-column:auto/span 24}.panel-grid .g-start-1{grid-column-start:1}.panel-grid .g-start-2{grid-column-start:2}.panel-grid .g-start-3{grid-column-start:3}.panel-grid .g-start-4{grid-column-start:4}.panel-grid .g-start-5{grid-column-start:5}.panel-grid .g-start-6{grid-column-start:6}.panel-grid .g-start-7{grid-column-start:7}.panel-grid .g-start-8{grid-column-start:8}.panel-grid .g-start-9{grid-column-start:9}.panel-grid .g-start-10{grid-column-start:10}.panel-grid .g-start-11{grid-column-start:11}.panel-grid .g-start-12{grid-column-start:12}.panel-grid .g-start-13{grid-column-start:13}.panel-grid .g-start-14{grid-column-start:14}.panel-grid .g-start-15{grid-column-start:15}.panel-grid .g-start-16{grid-column-start:16}.panel-grid .g-start-17{grid-column-start:17}.panel-grid .g-start-18{grid-column-start:18}.panel-grid .g-start-19{grid-column-start:19}.panel-grid .g-start-20{grid-column-start:20}.panel-grid .g-start-21{grid-column-start:21}.panel-grid .g-start-22{grid-column-start:22}.panel-grid .g-start-23{grid-column-start:23}@media(min-width: 576px){.panel-grid .g-col-sm-1{grid-column:auto/span 1}.panel-grid .g-col-sm-2{grid-column:auto/span 2}.panel-grid .g-col-sm-3{grid-column:auto/span 3}.panel-grid .g-col-sm-4{grid-column:auto/span 4}.panel-grid .g-col-sm-5{grid-column:auto/span 5}.panel-grid .g-col-sm-6{grid-column:auto/span 6}.panel-grid .g-col-sm-7{grid-column:auto/span 7}.panel-grid .g-col-sm-8{grid-column:auto/span 8}.panel-grid .g-col-sm-9{grid-column:auto/span 9}.panel-grid .g-col-sm-10{grid-column:auto/span 10}.panel-grid .g-col-sm-11{grid-column:auto/span 11}.panel-grid .g-col-sm-12{grid-column:auto/span 12}.panel-grid .g-col-sm-13{grid-column:auto/span 13}.panel-grid .g-col-sm-14{grid-column:auto/span 14}.panel-grid .g-col-sm-15{grid-column:auto/span 15}.panel-grid .g-col-sm-16{grid-column:auto/span 16}.panel-grid .g-col-sm-17{grid-column:auto/span 17}.panel-grid .g-col-sm-18{grid-column:auto/span 18}.panel-grid .g-col-sm-19{grid-column:auto/span 19}.panel-grid .g-col-sm-20{grid-column:auto/span 20}.panel-grid .g-col-sm-21{grid-column:auto/span 21}.panel-grid .g-col-sm-22{grid-column:auto/span 22}.panel-grid .g-col-sm-23{grid-column:auto/span 23}.panel-grid .g-col-sm-24{grid-column:auto/span 24}.panel-grid .g-start-sm-1{grid-column-start:1}.panel-grid .g-start-sm-2{grid-column-start:2}.panel-grid .g-start-sm-3{grid-column-start:3}.panel-grid .g-start-sm-4{grid-column-start:4}.panel-grid .g-start-sm-5{grid-column-start:5}.panel-grid .g-start-sm-6{grid-column-start:6}.panel-grid .g-start-sm-7{grid-column-start:7}.panel-grid .g-start-sm-8{grid-column-start:8}.panel-grid .g-start-sm-9{grid-column-start:9}.panel-grid .g-start-sm-10{grid-column-start:10}.panel-grid .g-start-sm-11{grid-column-start:11}.panel-grid .g-start-sm-12{grid-column-start:12}.panel-grid .g-start-sm-13{grid-column-start:13}.panel-grid .g-start-sm-14{grid-column-start:14}.panel-grid .g-start-sm-15{grid-column-start:15}.panel-grid .g-start-sm-16{grid-column-start:16}.panel-grid .g-start-sm-17{grid-column-start:17}.panel-grid .g-start-sm-18{grid-column-start:18}.panel-grid .g-start-sm-19{grid-column-start:19}.panel-grid .g-start-sm-20{grid-column-start:20}.panel-grid .g-start-sm-21{grid-column-start:21}.panel-grid .g-start-sm-22{grid-column-start:22}.panel-grid .g-start-sm-23{grid-column-start:23}}@media(min-width: 768px){.panel-grid .g-col-md-1{grid-column:auto/span 1}.panel-grid .g-col-md-2{grid-column:auto/span 2}.panel-grid .g-col-md-3{grid-column:auto/span 3}.panel-grid .g-col-md-4{grid-column:auto/span 4}.panel-grid .g-col-md-5{grid-column:auto/span 5}.panel-grid .g-col-md-6{grid-column:auto/span 6}.panel-grid .g-col-md-7{grid-column:auto/span 7}.panel-grid .g-col-md-8{grid-column:auto/span 8}.panel-grid .g-col-md-9{grid-column:auto/span 9}.panel-grid .g-col-md-10{grid-column:auto/span 10}.panel-grid .g-col-md-11{grid-column:auto/span 11}.panel-grid .g-col-md-12{grid-column:auto/span 12}.panel-grid .g-col-md-13{grid-column:auto/span 13}.panel-grid .g-col-md-14{grid-column:auto/span 14}.panel-grid .g-col-md-15{grid-column:auto/span 15}.panel-grid .g-col-md-16{grid-column:auto/span 16}.panel-grid .g-col-md-17{grid-column:auto/span 17}.panel-grid .g-col-md-18{grid-column:auto/span 18}.panel-grid .g-col-md-19{grid-column:auto/span 19}.panel-grid .g-col-md-20{grid-column:auto/span 20}.panel-grid .g-col-md-21{grid-column:auto/span 21}.panel-grid .g-col-md-22{grid-column:auto/span 22}.panel-grid .g-col-md-23{grid-column:auto/span 23}.panel-grid .g-col-md-24{grid-column:auto/span 24}.panel-grid .g-start-md-1{grid-column-start:1}.panel-grid .g-start-md-2{grid-column-start:2}.panel-grid .g-start-md-3{grid-column-start:3}.panel-grid .g-start-md-4{grid-column-start:4}.panel-grid .g-start-md-5{grid-column-start:5}.panel-grid .g-start-md-6{grid-column-start:6}.panel-grid .g-start-md-7{grid-column-start:7}.panel-grid .g-start-md-8{grid-column-start:8}.panel-grid .g-start-md-9{grid-column-start:9}.panel-grid .g-start-md-10{grid-column-start:10}.panel-grid .g-start-md-11{grid-column-start:11}.panel-grid .g-start-md-12{grid-column-start:12}.panel-grid .g-start-md-13{grid-column-start:13}.panel-grid .g-start-md-14{grid-column-start:14}.panel-grid .g-start-md-15{grid-column-start:15}.panel-grid .g-start-md-16{grid-column-start:16}.panel-grid .g-start-md-17{grid-column-start:17}.panel-grid .g-start-md-18{grid-column-start:18}.panel-grid .g-start-md-19{grid-column-start:19}.panel-grid .g-start-md-20{grid-column-start:20}.panel-grid .g-start-md-21{grid-column-start:21}.panel-grid .g-start-md-22{grid-column-start:22}.panel-grid .g-start-md-23{grid-column-start:23}}@media(min-width: 992px){.panel-grid .g-col-lg-1{grid-column:auto/span 1}.panel-grid .g-col-lg-2{grid-column:auto/span 2}.panel-grid .g-col-lg-3{grid-column:auto/span 3}.panel-grid .g-col-lg-4{grid-column:auto/span 4}.panel-grid .g-col-lg-5{grid-column:auto/span 5}.panel-grid .g-col-lg-6{grid-column:auto/span 6}.panel-grid .g-col-lg-7{grid-column:auto/span 7}.panel-grid .g-col-lg-8{grid-column:auto/span 8}.panel-grid .g-col-lg-9{grid-column:auto/span 9}.panel-grid .g-col-lg-10{grid-column:auto/span 10}.panel-grid .g-col-lg-11{grid-column:auto/span 11}.panel-grid .g-col-lg-12{grid-column:auto/span 12}.panel-grid .g-col-lg-13{grid-column:auto/span 13}.panel-grid .g-col-lg-14{grid-column:auto/span 14}.panel-grid .g-col-lg-15{grid-column:auto/span 15}.panel-grid .g-col-lg-16{grid-column:auto/span 16}.panel-grid .g-col-lg-17{grid-column:auto/span 17}.panel-grid .g-col-lg-18{grid-column:auto/span 18}.panel-grid .g-col-lg-19{grid-column:auto/span 19}.panel-grid .g-col-lg-20{grid-column:auto/span 20}.panel-grid .g-col-lg-21{grid-column:auto/span 21}.panel-grid .g-col-lg-22{grid-column:auto/span 22}.panel-grid .g-col-lg-23{grid-column:auto/span 23}.panel-grid .g-col-lg-24{grid-column:auto/span 24}.panel-grid .g-start-lg-1{grid-column-start:1}.panel-grid .g-start-lg-2{grid-column-start:2}.panel-grid .g-start-lg-3{grid-column-start:3}.panel-grid .g-start-lg-4{grid-column-start:4}.panel-grid .g-start-lg-5{grid-column-start:5}.panel-grid .g-start-lg-6{grid-column-start:6}.panel-grid .g-start-lg-7{grid-column-start:7}.panel-grid .g-start-lg-8{grid-column-start:8}.panel-grid .g-start-lg-9{grid-column-start:9}.panel-grid .g-start-lg-10{grid-column-start:10}.panel-grid .g-start-lg-11{grid-column-start:11}.panel-grid .g-start-lg-12{grid-column-start:12}.panel-grid .g-start-lg-13{grid-column-start:13}.panel-grid .g-start-lg-14{grid-column-start:14}.panel-grid .g-start-lg-15{grid-column-start:15}.panel-grid .g-start-lg-16{grid-column-start:16}.panel-grid .g-start-lg-17{grid-column-start:17}.panel-grid .g-start-lg-18{grid-column-start:18}.panel-grid .g-start-lg-19{grid-column-start:19}.panel-grid .g-start-lg-20{grid-column-start:20}.panel-grid .g-start-lg-21{grid-column-start:21}.panel-grid .g-start-lg-22{grid-column-start:22}.panel-grid .g-start-lg-23{grid-column-start:23}}@media(min-width: 1200px){.panel-grid .g-col-xl-1{grid-column:auto/span 1}.panel-grid .g-col-xl-2{grid-column:auto/span 2}.panel-grid .g-col-xl-3{grid-column:auto/span 3}.panel-grid .g-col-xl-4{grid-column:auto/span 4}.panel-grid .g-col-xl-5{grid-column:auto/span 5}.panel-grid .g-col-xl-6{grid-column:auto/span 6}.panel-grid .g-col-xl-7{grid-column:auto/span 7}.panel-grid .g-col-xl-8{grid-column:auto/span 8}.panel-grid .g-col-xl-9{grid-column:auto/span 9}.panel-grid .g-col-xl-10{grid-column:auto/span 10}.panel-grid .g-col-xl-11{grid-column:auto/span 11}.panel-grid .g-col-xl-12{grid-column:auto/span 12}.panel-grid .g-col-xl-13{grid-column:auto/span 13}.panel-grid .g-col-xl-14{grid-column:auto/span 14}.panel-grid .g-col-xl-15{grid-column:auto/span 15}.panel-grid .g-col-xl-16{grid-column:auto/span 16}.panel-grid .g-col-xl-17{grid-column:auto/span 17}.panel-grid .g-col-xl-18{grid-column:auto/span 18}.panel-grid .g-col-xl-19{grid-column:auto/span 19}.panel-grid .g-col-xl-20{grid-column:auto/span 20}.panel-grid .g-col-xl-21{grid-column:auto/span 21}.panel-grid .g-col-xl-22{grid-column:auto/span 22}.panel-grid .g-col-xl-23{grid-column:auto/span 23}.panel-grid .g-col-xl-24{grid-column:auto/span 24}.panel-grid .g-start-xl-1{grid-column-start:1}.panel-grid .g-start-xl-2{grid-column-start:2}.panel-grid .g-start-xl-3{grid-column-start:3}.panel-grid .g-start-xl-4{grid-column-start:4}.panel-grid .g-start-xl-5{grid-column-start:5}.panel-grid .g-start-xl-6{grid-column-start:6}.panel-grid .g-start-xl-7{grid-column-start:7}.panel-grid .g-start-xl-8{grid-column-start:8}.panel-grid .g-start-xl-9{grid-column-start:9}.panel-grid .g-start-xl-10{grid-column-start:10}.panel-grid .g-start-xl-11{grid-column-start:11}.panel-grid .g-start-xl-12{grid-column-start:12}.panel-grid .g-start-xl-13{grid-column-start:13}.panel-grid .g-start-xl-14{grid-column-start:14}.panel-grid .g-start-xl-15{grid-column-start:15}.panel-grid .g-start-xl-16{grid-column-start:16}.panel-grid .g-start-xl-17{grid-column-start:17}.panel-grid .g-start-xl-18{grid-column-start:18}.panel-grid .g-start-xl-19{grid-column-start:19}.panel-grid .g-start-xl-20{grid-column-start:20}.panel-grid .g-start-xl-21{grid-column-start:21}.panel-grid .g-start-xl-22{grid-column-start:22}.panel-grid .g-start-xl-23{grid-column-start:23}}@media(min-width: 1400px){.panel-grid .g-col-xxl-1{grid-column:auto/span 1}.panel-grid .g-col-xxl-2{grid-column:auto/span 2}.panel-grid .g-col-xxl-3{grid-column:auto/span 3}.panel-grid .g-col-xxl-4{grid-column:auto/span 4}.panel-grid .g-col-xxl-5{grid-column:auto/span 5}.panel-grid .g-col-xxl-6{grid-column:auto/span 6}.panel-grid .g-col-xxl-7{grid-column:auto/span 7}.panel-grid .g-col-xxl-8{grid-column:auto/span 8}.panel-grid .g-col-xxl-9{grid-column:auto/span 9}.panel-grid .g-col-xxl-10{grid-column:auto/span 10}.panel-grid .g-col-xxl-11{grid-column:auto/span 11}.panel-grid .g-col-xxl-12{grid-column:auto/span 12}.panel-grid .g-col-xxl-13{grid-column:auto/span 13}.panel-grid .g-col-xxl-14{grid-column:auto/span 14}.panel-grid .g-col-xxl-15{grid-column:auto/span 15}.panel-grid .g-col-xxl-16{grid-column:auto/span 16}.panel-grid .g-col-xxl-17{grid-column:auto/span 17}.panel-grid .g-col-xxl-18{grid-column:auto/span 18}.panel-grid .g-col-xxl-19{grid-column:auto/span 19}.panel-grid .g-col-xxl-20{grid-column:auto/span 20}.panel-grid .g-col-xxl-21{grid-column:auto/span 21}.panel-grid .g-col-xxl-22{grid-column:auto/span 22}.panel-grid .g-col-xxl-23{grid-column:auto/span 23}.panel-grid .g-col-xxl-24{grid-column:auto/span 24}.panel-grid .g-start-xxl-1{grid-column-start:1}.panel-grid .g-start-xxl-2{grid-column-start:2}.panel-grid .g-start-xxl-3{grid-column-start:3}.panel-grid .g-start-xxl-4{grid-column-start:4}.panel-grid .g-start-xxl-5{grid-column-start:5}.panel-grid .g-start-xxl-6{grid-column-start:6}.panel-grid .g-start-xxl-7{grid-column-start:7}.panel-grid .g-start-xxl-8{grid-column-start:8}.panel-grid .g-start-xxl-9{grid-column-start:9}.panel-grid .g-start-xxl-10{grid-column-start:10}.panel-grid .g-start-xxl-11{grid-column-start:11}.panel-grid .g-start-xxl-12{grid-column-start:12}.panel-grid .g-start-xxl-13{grid-column-start:13}.panel-grid .g-start-xxl-14{grid-column-start:14}.panel-grid .g-start-xxl-15{grid-column-start:15}.panel-grid .g-start-xxl-16{grid-column-start:16}.panel-grid .g-start-xxl-17{grid-column-start:17}.panel-grid .g-start-xxl-18{grid-column-start:18}.panel-grid .g-start-xxl-19{grid-column-start:19}.panel-grid .g-start-xxl-20{grid-column-start:20}.panel-grid .g-start-xxl-21{grid-column-start:21}.panel-grid .g-start-xxl-22{grid-column-start:22}.panel-grid .g-start-xxl-23{grid-column-start:23}}main{margin-top:1em;margin-bottom:1em}h1,.h1,h2,.h2{color:inherit;margin-top:2rem;margin-bottom:1rem;font-weight:600}h1.title,.title.h1{margin-top:0}main.content>section:first-of-type>h2:first-child,main.content>section:first-of-type>.h2:first-child{margin-top:0}h2,.h2{border-bottom:1px solid #dee2e6;padding-bottom:.5rem}h3,.h3{font-weight:600}h3,.h3,h4,.h4{opacity:.9;margin-top:1.5rem}h5,.h5,h6,.h6{opacity:.9}.header-section-number{color:#6d7a86}.nav-link.active .header-section-number{color:inherit}mark,.mark{padding:0em}.panel-caption,.figure-caption,.subfigure-caption,.table-caption,figcaption,caption{font-size:.9rem;color:#6d7a86}.quarto-layout-cell[data-ref-parent] caption{color:#6d7a86}.column-margin figcaption,.margin-caption,div.aside,aside,.column-margin{color:#6d7a86;font-size:.825rem}.panel-caption.margin-caption{text-align:inherit}.column-margin.column-container p{margin-bottom:0}.column-margin.column-container>*:not(.collapse):first-child{padding-bottom:.5em;display:block}.column-margin.column-container>*:not(.collapse):not(:first-child){padding-top:.5em;padding-bottom:.5em;display:block}.column-margin.column-container>*.collapse:not(.show){display:none}@media(min-width: 768px){.column-margin.column-container .callout-margin-content:first-child{margin-top:4.5em}.column-margin.column-container .callout-margin-content-simple:first-child{margin-top:3.5em}}.margin-caption>*{padding-top:.5em;padding-bottom:.5em}@media(max-width: 767.98px){.quarto-layout-row{flex-direction:column}}.nav-tabs .nav-item{margin-top:1px;cursor:pointer}.tab-content{margin-top:0px;border-left:#dee2e6 1px solid;border-right:#dee2e6 1px solid;border-bottom:#dee2e6 1px solid;margin-left:0;padding:1em;margin-bottom:1em}@media(max-width: 767.98px){.layout-sidebar{margin-left:0;margin-right:0}}.panel-sidebar,.panel-sidebar .form-control,.panel-input,.panel-input .form-control,.selectize-dropdown{font-size:.9rem}.panel-sidebar .form-control,.panel-input .form-control{padding-top:.1rem}.tab-pane div.sourceCode{margin-top:0px}.tab-pane>p{padding-top:0}.tab-pane>p:nth-child(1){padding-top:0}.tab-pane>p:last-child{margin-bottom:0}.tab-pane>pre:last-child{margin-bottom:0}.tab-content>.tab-pane:not(.active){display:none !important}div.sourceCode{background-color:rgba(233,236,239,.65);border:1px solid rgba(233,236,239,.65);border-radius:.25rem}pre.sourceCode{background-color:rgba(0,0,0,0)}pre.sourceCode{border:none;font-size:.875em;overflow:visible !important;padding:.4em}.callout pre.sourceCode{padding-left:0}div.sourceCode{overflow-y:hidden}.callout div.sourceCode{margin-left:initial}.blockquote{font-size:inherit;padding-left:1rem;padding-right:1.5rem;color:#6d7a86}.blockquote h1:first-child,.blockquote .h1:first-child,.blockquote h2:first-child,.blockquote .h2:first-child,.blockquote h3:first-child,.blockquote .h3:first-child,.blockquote h4:first-child,.blockquote .h4:first-child,.blockquote h5:first-child,.blockquote .h5:first-child{margin-top:0}pre{background-color:initial;padding:initial;border:initial}p pre code:not(.sourceCode),li pre code:not(.sourceCode),pre code:not(.sourceCode){background-color:initial}p code:not(.sourceCode),li code:not(.sourceCode),td code:not(.sourceCode){background-color:#f8f9fa;padding:.2em}nav p code:not(.sourceCode),nav li code:not(.sourceCode),nav td code:not(.sourceCode){background-color:rgba(0,0,0,0);padding:0}td code:not(.sourceCode){white-space:pre-wrap}#quarto-embedded-source-code-modal>.modal-dialog{max-width:1000px;padding-left:1.75rem;padding-right:1.75rem}#quarto-embedded-source-code-modal>.modal-dialog>.modal-content>.modal-body{padding:0}#quarto-embedded-source-code-modal>.modal-dialog>.modal-content>.modal-body div.sourceCode{margin:0;padding:.2rem .2rem;border-radius:0px;border:none}#quarto-embedded-source-code-modal>.modal-dialog>.modal-content>.modal-header{padding:.7rem}.code-tools-button{font-size:1rem;padding:.15rem .15rem;margin-left:5px;color:#6c757d;background-color:rgba(0,0,0,0);transition:initial;cursor:pointer}.code-tools-button>.bi::before{display:inline-block;height:1rem;width:1rem;content:"";vertical-align:-0.125em;background-image:url('data:image/svg+xml,');background-repeat:no-repeat;background-size:1rem 1rem}.code-tools-button:hover>.bi::before{background-image:url('data:image/svg+xml,')}#quarto-embedded-source-code-modal .code-copy-button>.bi::before{background-image:url('data:image/svg+xml,')}#quarto-embedded-source-code-modal .code-copy-button-checked>.bi::before{background-image:url('data:image/svg+xml,')}.sidebar{will-change:top;transition:top 200ms linear;position:sticky;overflow-y:auto;padding-top:1.2em;max-height:100vh}.sidebar.toc-left,.sidebar.margin-sidebar{top:0px;padding-top:1em}.sidebar.quarto-banner-title-block-sidebar>*{padding-top:1.65em}figure .quarto-notebook-link{margin-top:.5em}.quarto-notebook-link{font-size:.75em;color:#6c757d;margin-bottom:1em;text-decoration:none;display:block}.quarto-notebook-link:hover{text-decoration:underline;color:#2761e3}.quarto-notebook-link::before{display:inline-block;height:.75rem;width:.75rem;margin-bottom:0em;margin-right:.25em;content:"";vertical-align:-0.125em;background-image:url('data:image/svg+xml,');background-repeat:no-repeat;background-size:.75rem .75rem}.toc-actions i.bi,.quarto-code-links i.bi,.quarto-other-links i.bi,.quarto-alternate-notebooks i.bi,.quarto-alternate-formats i.bi{margin-right:.4em;font-size:.8rem}.quarto-other-links-text-target .quarto-code-links i.bi,.quarto-other-links-text-target .quarto-other-links i.bi{margin-right:.2em}.quarto-other-formats-text-target .quarto-alternate-formats i.bi{margin-right:.1em}.toc-actions i.bi.empty,.quarto-code-links i.bi.empty,.quarto-other-links i.bi.empty,.quarto-alternate-notebooks i.bi.empty,.quarto-alternate-formats i.bi.empty{padding-left:1em}.quarto-notebook h2,.quarto-notebook .h2{border-bottom:none}.quarto-notebook .cell-container{display:flex}.quarto-notebook .cell-container .cell{flex-grow:4}.quarto-notebook .cell-container .cell-decorator{padding-top:1.5em;padding-right:1em;text-align:right}.quarto-notebook .cell-container.code-fold .cell-decorator{padding-top:3em}.quarto-notebook .cell-code code{white-space:pre-wrap}.quarto-notebook .cell .cell-output-stderr pre code,.quarto-notebook .cell .cell-output-stdout pre code{white-space:pre-wrap;overflow-wrap:anywhere}.toc-actions,.quarto-alternate-formats,.quarto-other-links,.quarto-code-links,.quarto-alternate-notebooks{padding-left:0em}.sidebar .toc-actions a,.sidebar .quarto-alternate-formats a,.sidebar .quarto-other-links a,.sidebar .quarto-code-links a,.sidebar .quarto-alternate-notebooks a,.sidebar nav[role=doc-toc] a{text-decoration:none}.sidebar .toc-actions a:hover,.sidebar .quarto-other-links a:hover,.sidebar .quarto-code-links a:hover,.sidebar .quarto-alternate-formats a:hover,.sidebar .quarto-alternate-notebooks a:hover{color:#2761e3}.sidebar .toc-actions h2,.sidebar .toc-actions .h2,.sidebar .quarto-code-links h2,.sidebar .quarto-code-links .h2,.sidebar .quarto-other-links h2,.sidebar .quarto-other-links .h2,.sidebar .quarto-alternate-notebooks h2,.sidebar .quarto-alternate-notebooks .h2,.sidebar .quarto-alternate-formats h2,.sidebar .quarto-alternate-formats .h2,.sidebar nav[role=doc-toc]>h2,.sidebar nav[role=doc-toc]>.h2{font-weight:500;margin-bottom:.2rem;margin-top:.3rem;font-family:inherit;border-bottom:0;padding-bottom:0;padding-top:0px}.sidebar .toc-actions>h2,.sidebar .toc-actions>.h2,.sidebar .quarto-code-links>h2,.sidebar .quarto-code-links>.h2,.sidebar .quarto-other-links>h2,.sidebar .quarto-other-links>.h2,.sidebar .quarto-alternate-notebooks>h2,.sidebar .quarto-alternate-notebooks>.h2,.sidebar .quarto-alternate-formats>h2,.sidebar .quarto-alternate-formats>.h2{font-size:.8rem}.sidebar nav[role=doc-toc]>h2,.sidebar nav[role=doc-toc]>.h2{font-size:.875rem}.sidebar nav[role=doc-toc]>ul a{border-left:1px solid #e9ecef;padding-left:.6rem}.sidebar .toc-actions h2>ul a,.sidebar .toc-actions .h2>ul a,.sidebar .quarto-code-links h2>ul a,.sidebar .quarto-code-links .h2>ul a,.sidebar .quarto-other-links h2>ul a,.sidebar .quarto-other-links .h2>ul a,.sidebar .quarto-alternate-notebooks h2>ul a,.sidebar .quarto-alternate-notebooks .h2>ul a,.sidebar .quarto-alternate-formats h2>ul a,.sidebar .quarto-alternate-formats .h2>ul a{border-left:none;padding-left:.6rem}.sidebar .toc-actions ul a:empty,.sidebar .quarto-code-links ul a:empty,.sidebar .quarto-other-links ul a:empty,.sidebar .quarto-alternate-notebooks ul a:empty,.sidebar .quarto-alternate-formats ul a:empty,.sidebar nav[role=doc-toc]>ul a:empty{display:none}.sidebar .toc-actions ul,.sidebar .quarto-code-links ul,.sidebar .quarto-other-links ul,.sidebar .quarto-alternate-notebooks ul,.sidebar .quarto-alternate-formats ul{padding-left:0;list-style:none}.sidebar nav[role=doc-toc] ul{list-style:none;padding-left:0;list-style:none}.sidebar nav[role=doc-toc]>ul{margin-left:.45em}.quarto-margin-sidebar nav[role=doc-toc]{padding-left:.5em}.sidebar .toc-actions>ul,.sidebar .quarto-code-links>ul,.sidebar .quarto-other-links>ul,.sidebar .quarto-alternate-notebooks>ul,.sidebar .quarto-alternate-formats>ul{font-size:.8rem}.sidebar nav[role=doc-toc]>ul{font-size:.875rem}.sidebar .toc-actions ul li a,.sidebar .quarto-code-links ul li a,.sidebar .quarto-other-links ul li a,.sidebar .quarto-alternate-notebooks ul li a,.sidebar .quarto-alternate-formats ul li a,.sidebar nav[role=doc-toc]>ul li a{line-height:1.1rem;padding-bottom:.2rem;padding-top:.2rem;color:inherit}.sidebar nav[role=doc-toc] ul>li>ul>li>a{padding-left:1.2em}.sidebar nav[role=doc-toc] ul>li>ul>li>ul>li>a{padding-left:2.4em}.sidebar nav[role=doc-toc] ul>li>ul>li>ul>li>ul>li>a{padding-left:3.6em}.sidebar nav[role=doc-toc] ul>li>ul>li>ul>li>ul>li>ul>li>a{padding-left:4.8em}.sidebar nav[role=doc-toc] ul>li>ul>li>ul>li>ul>li>ul>li>ul>li>a{padding-left:6em}.sidebar nav[role=doc-toc] ul>li>a.active,.sidebar nav[role=doc-toc] ul>li>ul>li>a.active{border-left:1px solid #2761e3;color:#2761e3 !important}.sidebar nav[role=doc-toc] ul>li>a:hover,.sidebar nav[role=doc-toc] ul>li>ul>li>a:hover{color:#2761e3 !important}kbd,.kbd{color:#343a40;background-color:#f8f9fa;border:1px solid;border-radius:5px;border-color:#dee2e6}.quarto-appendix-contents div.hanging-indent{margin-left:0em}.quarto-appendix-contents div.hanging-indent div.csl-entry{margin-left:1em;text-indent:-1em}.citation a,.footnote-ref{text-decoration:none}.footnotes ol{padding-left:1em}.tippy-content>*{margin-bottom:.7em}.tippy-content>*:last-child{margin-bottom:0}.callout{margin-top:1.25rem;margin-bottom:1.25rem;border-radius:.25rem;overflow-wrap:break-word}.callout .callout-title-container{overflow-wrap:anywhere}.callout.callout-style-simple{padding:.4em .7em;border-left:5px solid;border-right:1px solid #dee2e6;border-top:1px solid #dee2e6;border-bottom:1px solid #dee2e6}.callout.callout-style-default{border-left:5px solid;border-right:1px solid #dee2e6;border-top:1px solid #dee2e6;border-bottom:1px solid #dee2e6}.callout .callout-body-container{flex-grow:1}.callout.callout-style-simple .callout-body{font-size:.9rem;font-weight:400}.callout.callout-style-default .callout-body{font-size:.9rem;font-weight:400}.callout:not(.no-icon).callout-titled.callout-style-simple .callout-body{padding-left:1.6em}.callout.callout-titled>.callout-header{padding-top:.2em;margin-bottom:-0.2em}.callout.callout-style-simple>div.callout-header{border-bottom:none;font-size:.9rem;font-weight:600;opacity:75%}.callout.callout-style-default>div.callout-header{border-bottom:none;font-weight:600;opacity:85%;font-size:.9rem;padding-left:.5em;padding-right:.5em}.callout.callout-style-default .callout-body{padding-left:.5em;padding-right:.5em}.callout.callout-style-default .callout-body>:first-child{padding-top:.5rem;margin-top:0}.callout>div.callout-header[data-bs-toggle=collapse]{cursor:pointer}.callout.callout-style-default .callout-header[aria-expanded=false],.callout.callout-style-default .callout-header[aria-expanded=true]{padding-top:0px;margin-bottom:0px;align-items:center}.callout.callout-titled .callout-body>:last-child:not(.sourceCode),.callout.callout-titled .callout-body>div>:last-child:not(.sourceCode){padding-bottom:.5rem;margin-bottom:0}.callout:not(.callout-titled) .callout-body>:first-child,.callout:not(.callout-titled) .callout-body>div>:first-child{margin-top:.25rem}.callout:not(.callout-titled) .callout-body>:last-child,.callout:not(.callout-titled) .callout-body>div>:last-child{margin-bottom:.2rem}.callout.callout-style-simple .callout-icon::before,.callout.callout-style-simple .callout-toggle::before{height:1rem;width:1rem;display:inline-block;content:"";background-repeat:no-repeat;background-size:1rem 1rem}.callout.callout-style-default .callout-icon::before,.callout.callout-style-default .callout-toggle::before{height:.9rem;width:.9rem;display:inline-block;content:"";background-repeat:no-repeat;background-size:.9rem .9rem}.callout.callout-style-default .callout-toggle::before{margin-top:5px}.callout .callout-btn-toggle .callout-toggle::before{transition:transform .2s linear}.callout .callout-header[aria-expanded=false] .callout-toggle::before{transform:rotate(-90deg)}.callout .callout-header[aria-expanded=true] .callout-toggle::before{transform:none}.callout.callout-style-simple:not(.no-icon) div.callout-icon-container{padding-top:.2em;padding-right:.55em}.callout.callout-style-default:not(.no-icon) div.callout-icon-container{padding-top:.1em;padding-right:.35em}.callout.callout-style-default:not(.no-icon) div.callout-title-container{margin-top:-1px}.callout.callout-style-default.callout-caution:not(.no-icon) div.callout-icon-container{padding-top:.3em;padding-right:.35em}.callout>.callout-body>.callout-icon-container>.no-icon,.callout>.callout-header>.callout-icon-container>.no-icon{display:none}div.callout.callout{border-left-color:#6c757d}div.callout.callout-style-default>.callout-header{background-color:#6c757d}div.callout-note.callout{border-left-color:#2780e3}div.callout-note.callout-style-default>.callout-header{background-color:#e9f2fc}div.callout-note:not(.callout-titled) .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-note.callout-titled .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-note .callout-toggle::before{background-image:url('data:image/svg+xml,')}div.callout-tip.callout{border-left-color:#3fb618}div.callout-tip.callout-style-default>.callout-header{background-color:#ecf8e8}div.callout-tip:not(.callout-titled) .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-tip.callout-titled .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-tip .callout-toggle::before{background-image:url('data:image/svg+xml,')}div.callout-warning.callout{border-left-color:#ff7518}div.callout-warning.callout-style-default>.callout-header{background-color:#fff1e8}div.callout-warning:not(.callout-titled) .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-warning.callout-titled .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-warning .callout-toggle::before{background-image:url('data:image/svg+xml,')}div.callout-caution.callout{border-left-color:#f0ad4e}div.callout-caution.callout-style-default>.callout-header{background-color:#fef7ed}div.callout-caution:not(.callout-titled) .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-caution.callout-titled .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-caution .callout-toggle::before{background-image:url('data:image/svg+xml,')}div.callout-important.callout{border-left-color:#ff0039}div.callout-important.callout-style-default>.callout-header{background-color:#ffe6eb}div.callout-important:not(.callout-titled) .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-important.callout-titled .callout-icon::before{background-image:url('data:image/svg+xml,');}div.callout-important .callout-toggle::before{background-image:url('data:image/svg+xml,')}.quarto-toggle-container{display:flex;align-items:center}.quarto-reader-toggle .bi::before,.quarto-color-scheme-toggle .bi::before{display:inline-block;height:1rem;width:1rem;content:"";background-repeat:no-repeat;background-size:1rem 1rem}.sidebar-navigation{padding-left:20px}.navbar{background-color:#2780e3;color:#fdfeff}.navbar .quarto-color-scheme-toggle:not(.alternate) .bi::before{background-image:url('data:image/svg+xml,')}.navbar .quarto-color-scheme-toggle.alternate .bi::before{background-image:url('data:image/svg+xml,')}.sidebar-navigation .quarto-color-scheme-toggle:not(.alternate) .bi::before{background-image:url('data:image/svg+xml,')}.sidebar-navigation .quarto-color-scheme-toggle.alternate .bi::before{background-image:url('data:image/svg+xml,')}.quarto-sidebar-toggle{border-color:#dee2e6;border-bottom-left-radius:.25rem;border-bottom-right-radius:.25rem;border-style:solid;border-width:1px;overflow:hidden;border-top-width:0px;padding-top:0px !important}.quarto-sidebar-toggle-title{cursor:pointer;padding-bottom:2px;margin-left:.25em;text-align:center;font-weight:400;font-size:.775em}#quarto-content .quarto-sidebar-toggle{background:#fafafa}#quarto-content .quarto-sidebar-toggle-title{color:#343a40}.quarto-sidebar-toggle-icon{color:#dee2e6;margin-right:.5em;float:right;transition:transform .2s ease}.quarto-sidebar-toggle-icon::before{padding-top:5px}.quarto-sidebar-toggle.expanded .quarto-sidebar-toggle-icon{transform:rotate(-180deg)}.quarto-sidebar-toggle.expanded .quarto-sidebar-toggle-title{border-bottom:solid #dee2e6 1px}.quarto-sidebar-toggle-contents{background-color:#fff;padding-right:10px;padding-left:10px;margin-top:0px !important;transition:max-height .5s ease}.quarto-sidebar-toggle.expanded .quarto-sidebar-toggle-contents{padding-top:1em;padding-bottom:10px}@media(max-width: 767.98px){.sidebar-menu-container{padding-bottom:5em}}.quarto-sidebar-toggle:not(.expanded) .quarto-sidebar-toggle-contents{padding-top:0px !important;padding-bottom:0px}nav[role=doc-toc]{z-index:1020}#quarto-sidebar>*,nav[role=doc-toc]>*{transition:opacity .1s ease,border .1s ease}#quarto-sidebar.slow>*,nav[role=doc-toc].slow>*{transition:opacity .4s ease,border .4s ease}.quarto-color-scheme-toggle:not(.alternate).top-right .bi::before{background-image:url('data:image/svg+xml,')}.quarto-color-scheme-toggle.alternate.top-right .bi::before{background-image:url('data:image/svg+xml,')}#quarto-appendix.default{border-top:1px solid #dee2e6}#quarto-appendix.default{background-color:#fff;padding-top:1.5em;margin-top:2em;z-index:998}#quarto-appendix.default .quarto-appendix-heading{margin-top:0;line-height:1.4em;font-weight:600;opacity:.9;border-bottom:none;margin-bottom:0}#quarto-appendix.default .footnotes ol,#quarto-appendix.default .footnotes ol li>p:last-of-type,#quarto-appendix.default .quarto-appendix-contents>p:last-of-type{margin-bottom:0}#quarto-appendix.default .footnotes ol{margin-left:.5em}#quarto-appendix.default .quarto-appendix-secondary-label{margin-bottom:.4em}#quarto-appendix.default .quarto-appendix-bibtex{font-size:.7em;padding:1em;border:solid 1px #dee2e6;margin-bottom:1em}#quarto-appendix.default .quarto-appendix-bibtex code.sourceCode{white-space:pre-wrap}#quarto-appendix.default .quarto-appendix-citeas{font-size:.9em;padding:1em;border:solid 1px #dee2e6;margin-bottom:1em}#quarto-appendix.default .quarto-appendix-heading{font-size:1em !important}#quarto-appendix.default *[role=doc-endnotes]>ol,#quarto-appendix.default .quarto-appendix-contents>*:not(h2):not(.h2){font-size:.9em}#quarto-appendix.default section{padding-bottom:1.5em}#quarto-appendix.default section *[role=doc-endnotes],#quarto-appendix.default section>*:not(a){opacity:.9;word-wrap:break-word}.btn.btn-quarto,div.cell-output-display .btn-quarto{--bs-btn-color: #cacccd;--bs-btn-bg: #343a40;--bs-btn-border-color: #343a40;--bs-btn-hover-color: #cacccd;--bs-btn-hover-bg: #52585d;--bs-btn-hover-border-color: #484e53;--bs-btn-focus-shadow-rgb: 75, 80, 85;--bs-btn-active-color: #fff;--bs-btn-active-bg: #5d6166;--bs-btn-active-border-color: #484e53;--bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color: #fff;--bs-btn-disabled-bg: #343a40;--bs-btn-disabled-border-color: #343a40}nav.quarto-secondary-nav.color-navbar{background-color:#2780e3;color:#fdfeff}nav.quarto-secondary-nav.color-navbar h1,nav.quarto-secondary-nav.color-navbar .h1,nav.quarto-secondary-nav.color-navbar .quarto-btn-toggle{color:#fdfeff}@media(max-width: 991.98px){body.nav-sidebar .quarto-title-banner{margin-bottom:0;padding-bottom:1em}body.nav-sidebar #title-block-header{margin-block-end:0}}p.subtitle{margin-top:.25em;margin-bottom:.5em}code a:any-link{color:inherit;text-decoration-color:#6c757d}/*! light */div.observablehq table thead tr th{background-color:var(--bs-body-bg)}input,button,select,optgroup,textarea{background-color:var(--bs-body-bg)}.code-annotated .code-copy-button{margin-right:1.25em;margin-top:0;padding-bottom:0;padding-top:3px}.code-annotation-gutter-bg{background-color:#fff}.code-annotation-gutter{background-color:rgba(233,236,239,.65)}.code-annotation-gutter,.code-annotation-gutter-bg{height:100%;width:calc(20px + .5em);position:absolute;top:0;right:0}dl.code-annotation-container-grid dt{margin-right:1em;margin-top:.25rem}dl.code-annotation-container-grid dt{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;color:#4b545c;border:solid #4b545c 1px;border-radius:50%;height:22px;width:22px;line-height:22px;font-size:11px;text-align:center;vertical-align:middle;text-decoration:none}dl.code-annotation-container-grid dt[data-target-cell]{cursor:pointer}dl.code-annotation-container-grid dt[data-target-cell].code-annotation-active{color:#fff;border:solid #aaa 1px;background-color:#aaa}pre.code-annotation-code{padding-top:0;padding-bottom:0}pre.code-annotation-code code{z-index:3}#code-annotation-line-highlight-gutter{width:100%;border-top:solid rgba(170,170,170,.2666666667) 1px;border-bottom:solid rgba(170,170,170,.2666666667) 1px;z-index:2;background-color:rgba(170,170,170,.1333333333)}#code-annotation-line-highlight{margin-left:-4em;width:calc(100% + 4em);border-top:solid rgba(170,170,170,.2666666667) 1px;border-bottom:solid rgba(170,170,170,.2666666667) 1px;z-index:2;background-color:rgba(170,170,170,.1333333333)}code.sourceCode .code-annotation-anchor.code-annotation-active{background-color:var(--quarto-hl-normal-color, #aaaaaa);border:solid var(--quarto-hl-normal-color, #aaaaaa) 1px;color:#e9ecef;font-weight:bolder}code.sourceCode .code-annotation-anchor{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;color:var(--quarto-hl-co-color);border:solid var(--quarto-hl-co-color) 1px;border-radius:50%;height:18px;width:18px;font-size:9px;margin-top:2px}code.sourceCode button.code-annotation-anchor{padding:2px;user-select:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none}code.sourceCode a.code-annotation-anchor{line-height:18px;text-align:center;vertical-align:middle;cursor:default;text-decoration:none}@media print{.page-columns .column-screen-inset{grid-column:page-start-inset/page-end-inset;z-index:998;opacity:.999}.page-columns .column-screen-inset table{background:#fff}.page-columns .column-screen-inset-left{grid-column:page-start-inset/body-content-end;z-index:998;opacity:.999}.page-columns .column-screen-inset-left table{background:#fff}.page-columns .column-screen-inset-right{grid-column:body-content-start/page-end-inset;z-index:998;opacity:.999}.page-columns .column-screen-inset-right table{background:#fff}.page-columns .column-screen{grid-column:page-start/page-end;z-index:998;opacity:.999}.page-columns .column-screen table{background:#fff}.page-columns .column-screen-left{grid-column:page-start/body-content-end;z-index:998;opacity:.999}.page-columns .column-screen-left table{background:#fff}.page-columns .column-screen-right{grid-column:body-content-start/page-end;z-index:998;opacity:.999}.page-columns .column-screen-right table{background:#fff}.page-columns .column-screen-inset-shaded{grid-column:page-start-inset/page-end-inset;padding:1em;background:#f8f9fa;z-index:998;opacity:.999;margin-bottom:1em}}.quarto-video{margin-bottom:1em}.table{border-top:1px solid #ebedee;border-bottom:1px solid #ebedee}.table>thead{border-top-width:0;border-bottom:1px solid #b2bac1}.table a{word-break:break-word}.table>:not(caption)>*>*{background-color:unset;color:unset}#quarto-document-content .crosstalk-input .checkbox input[type=checkbox],#quarto-document-content .crosstalk-input .checkbox-inline input[type=checkbox]{position:unset;margin-top:unset;margin-left:unset}#quarto-document-content .row{margin-left:unset;margin-right:unset}.quarto-xref{white-space:nowrap}a.external:after{content:"";background-image:url('data:image/svg+xml,');background-size:contain;background-repeat:no-repeat;background-position:center center;margin-left:.2em;padding-right:.75em}div.sourceCode code a.external:after{content:none}a.external:after:hover{cursor:pointer}.quarto-ext-icon{display:inline-block;font-size:.75em;padding-left:.3em}.code-with-filename .code-with-filename-file{margin-bottom:0;padding-bottom:2px;padding-top:2px;padding-left:.7em;border:var(--quarto-border-width) solid var(--quarto-border-color);border-radius:var(--quarto-border-radius);border-bottom:0;border-bottom-left-radius:0%;border-bottom-right-radius:0%}.code-with-filename div.sourceCode,.reveal .code-with-filename div.sourceCode{margin-top:0;border-top-left-radius:0%;border-top-right-radius:0%}.code-with-filename .code-with-filename-file pre{margin-bottom:0}.code-with-filename .code-with-filename-file{background-color:rgba(219,219,219,.8)}.quarto-dark .code-with-filename .code-with-filename-file{background-color:#555}.code-with-filename .code-with-filename-file strong{font-weight:400}.quarto-title-banner{margin-bottom:1em;color:#fdfeff;background:#2780e3}.quarto-title-banner a{color:#fdfeff}.quarto-title-banner h1,.quarto-title-banner .h1,.quarto-title-banner h2,.quarto-title-banner .h2{color:#fdfeff}.quarto-title-banner .code-tools-button{color:#97cbff}.quarto-title-banner .code-tools-button:hover{color:#fdfeff}.quarto-title-banner .code-tools-button>.bi::before{background-image:url('data:image/svg+xml,')}.quarto-title-banner .code-tools-button:hover>.bi::before{background-image:url('data:image/svg+xml,')}.quarto-title-banner .quarto-title .title{font-weight:600}.quarto-title-banner .quarto-categories{margin-top:.75em}@media(min-width: 992px){.quarto-title-banner{padding-top:2.5em;padding-bottom:2.5em}}@media(max-width: 991.98px){.quarto-title-banner{padding-top:1em;padding-bottom:1em}}@media(max-width: 767.98px){body.hypothesis-enabled #title-block-header>*{padding-right:20px}}main.quarto-banner-title-block>section:first-child>h2,main.quarto-banner-title-block>section:first-child>.h2,main.quarto-banner-title-block>section:first-child>h3,main.quarto-banner-title-block>section:first-child>.h3,main.quarto-banner-title-block>section:first-child>h4,main.quarto-banner-title-block>section:first-child>.h4{margin-top:0}.quarto-title .quarto-categories{display:flex;flex-wrap:wrap;row-gap:.5em;column-gap:.4em;padding-bottom:.5em;margin-top:.75em}.quarto-title .quarto-categories .quarto-category{padding:.25em .75em;font-size:.65em;text-transform:uppercase;border:solid 1px;border-radius:.25rem;opacity:.6}.quarto-title .quarto-categories .quarto-category a{color:inherit}.quarto-title-meta-container{display:grid;grid-template-columns:1fr auto}.quarto-title-meta-column-end{display:flex;flex-direction:column;padding-left:1em}.quarto-title-meta-column-end a .bi{margin-right:.3em}#title-block-header.quarto-title-block.default .quarto-title-meta{display:grid;grid-template-columns:minmax(max-content, 1fr) 1fr;grid-column-gap:1em}#title-block-header.quarto-title-block.default .quarto-title .title{margin-bottom:0}#title-block-header.quarto-title-block.default .quarto-title-author-orcid img{margin-top:-0.2em;height:.8em;width:.8em}#title-block-header.quarto-title-block.default .quarto-title-author-email{opacity:.7}#title-block-header.quarto-title-block.default .quarto-description p:last-of-type{margin-bottom:0}#title-block-header.quarto-title-block.default .quarto-title-meta-contents p,#title-block-header.quarto-title-block.default .quarto-title-authors p,#title-block-header.quarto-title-block.default .quarto-title-affiliations p{margin-bottom:.1em}#title-block-header.quarto-title-block.default .quarto-title-meta-heading{text-transform:uppercase;margin-top:1em;font-size:.8em;opacity:.8;font-weight:400}#title-block-header.quarto-title-block.default .quarto-title-meta-contents{font-size:.9em}#title-block-header.quarto-title-block.default .quarto-title-meta-contents p.affiliation:last-of-type{margin-bottom:.1em}#title-block-header.quarto-title-block.default p.affiliation{margin-bottom:.1em}#title-block-header.quarto-title-block.default .keywords,#title-block-header.quarto-title-block.default .description,#title-block-header.quarto-title-block.default .abstract{margin-top:0}#title-block-header.quarto-title-block.default .keywords>p,#title-block-header.quarto-title-block.default .description>p,#title-block-header.quarto-title-block.default .abstract>p{font-size:.9em}#title-block-header.quarto-title-block.default .keywords>p:last-of-type,#title-block-header.quarto-title-block.default .description>p:last-of-type,#title-block-header.quarto-title-block.default .abstract>p:last-of-type{margin-bottom:0}#title-block-header.quarto-title-block.default .keywords .block-title,#title-block-header.quarto-title-block.default .description .block-title,#title-block-header.quarto-title-block.default .abstract .block-title{margin-top:1em;text-transform:uppercase;font-size:.8em;opacity:.8;font-weight:400}#title-block-header.quarto-title-block.default .quarto-title-meta-author{display:grid;grid-template-columns:minmax(max-content, 1fr) 1fr;grid-column-gap:1em}.quarto-title-tools-only{display:flex;justify-content:right}body{-webkit-font-smoothing:antialiased}.badge.bg-light{color:#343a40}.progress .progress-bar{font-size:8px;line-height:8px} diff --git a/site_libs/bootstrap/bootstrap.min.js b/site_libs/bootstrap/bootstrap.min.js new file mode 100644 index 000000000..e8f21f703 --- /dev/null +++ b/site_libs/bootstrap/bootstrap.min.js @@ -0,0 +1,7 @@ +/*! + * Bootstrap v5.3.1 (https://getbootstrap.com/) + * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ +!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap=e()}(this,(function(){"use strict";const t=new Map,e={set(e,i,n){t.has(e)||t.set(e,new Map);const s=t.get(e);s.has(i)||0===s.size?s.set(i,n):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(s.keys())[0]}.`)},get:(e,i)=>t.has(e)&&t.get(e).get(i)||null,remove(e,i){if(!t.has(e))return;const n=t.get(e);n.delete(i),0===n.size&&t.delete(e)}},i="transitionend",n=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),s=t=>{t.dispatchEvent(new Event(i))},o=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),r=t=>o(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(n(t)):null,a=t=>{if(!o(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},l=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),c=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?c(t.parentNode):null},h=()=>{},d=t=>{t.offsetHeight},u=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,f=[],p=()=>"rtl"===document.documentElement.dir,m=t=>{var e;e=()=>{const e=u();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(f.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of f)t()})),f.push(e)):e()},g=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,_=(t,e,n=!0)=>{if(!n)return void g(t);const o=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let r=!1;const a=({target:n})=>{n===e&&(r=!0,e.removeEventListener(i,a),g(t))};e.addEventListener(i,a),setTimeout((()=>{r||s(e)}),o)},b=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},v=/[^.]*(?=\..*)\.|.*/,y=/\..*/,w=/::\d+$/,A={};let E=1;const T={mouseenter:"mouseover",mouseleave:"mouseout"},C=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function O(t,e){return e&&`${e}::${E++}`||t.uidEvent||E++}function x(t){const e=O(t);return t.uidEvent=e,A[e]=A[e]||{},A[e]}function k(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function L(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=I(t);return C.has(o)||(o=t),[n,s,o]}function S(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=L(e,i,n);if(e in T){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=x(t),c=l[a]||(l[a]={}),h=k(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=O(r,e.replace(v,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return P(s,{delegateTarget:r}),n.oneOff&&N.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return P(n,{delegateTarget:t}),i.oneOff&&N.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function D(t,e,i,n,s){const o=k(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function $(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&D(t,e,i,r.callable,r.delegationSelector)}function I(t){return t=t.replace(y,""),T[t]||t}const N={on(t,e,i,n){S(t,e,i,n,!1)},one(t,e,i,n){S(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=L(e,i,n),a=r!==e,l=x(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))$(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(w,"");a&&!e.includes(s)||D(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;D(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=u();let s=null,o=!0,r=!0,a=!1;e!==I(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=P(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function P(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function M(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function j(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const F={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${j(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${j(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=M(t.dataset[n])}return e},getDataAttribute:(t,e)=>M(t.getAttribute(`data-bs-${j(e)}`))};class H{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=o(e)?F.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...o(e)?F.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],r=o(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(r))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)}var i}}class W extends H{constructor(t,i){super(),(t=r(t))&&(this._element=t,this._config=this._getConfig(i),e.set(this._element,this.constructor.DATA_KEY,this))}dispose(){e.remove(this._element,this.constructor.DATA_KEY),N.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){_(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return e.get(r(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.1"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const B=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return n(e)},z={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!l(t)&&a(t)))},getSelectorFromElement(t){const e=B(t);return e&&z.findOne(e)?e:null},getElementFromSelector(t){const e=B(t);return e?z.findOne(e):null},getMultipleElementsFromSelector(t){const e=B(t);return e?z.find(e):[]}},R=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;N.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),l(this))return;const s=z.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},q=".bs.alert",V=`close${q}`,K=`closed${q}`;class Q extends W{static get NAME(){return"alert"}close(){if(N.trigger(this._element,V).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),N.trigger(this._element,K),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Q.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}R(Q,"close"),m(Q);const X='[data-bs-toggle="button"]';class Y extends W{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=Y.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}N.on(document,"click.bs.button.data-api",X,(t=>{t.preventDefault();const e=t.target.closest(X);Y.getOrCreateInstance(e).toggle()})),m(Y);const U=".bs.swipe",G=`touchstart${U}`,J=`touchmove${U}`,Z=`touchend${U}`,tt=`pointerdown${U}`,et=`pointerup${U}`,it={endCallback:null,leftCallback:null,rightCallback:null},nt={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class st extends H{constructor(t,e){super(),this._element=t,t&&st.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return it}static get DefaultType(){return nt}static get NAME(){return"swipe"}dispose(){N.off(this._element,U)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),g(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&g(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(N.on(this._element,tt,(t=>this._start(t))),N.on(this._element,et,(t=>this._end(t))),this._element.classList.add("pointer-event")):(N.on(this._element,G,(t=>this._start(t))),N.on(this._element,J,(t=>this._move(t))),N.on(this._element,Z,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const ot=".bs.carousel",rt=".data-api",at="next",lt="prev",ct="left",ht="right",dt=`slide${ot}`,ut=`slid${ot}`,ft=`keydown${ot}`,pt=`mouseenter${ot}`,mt=`mouseleave${ot}`,gt=`dragstart${ot}`,_t=`load${ot}${rt}`,bt=`click${ot}${rt}`,vt="carousel",yt="active",wt=".active",At=".carousel-item",Et=wt+At,Tt={ArrowLeft:ht,ArrowRight:ct},Ct={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},Ot={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class xt extends W{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=z.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===vt&&this.cycle()}static get Default(){return Ct}static get DefaultType(){return Ot}static get NAME(){return"carousel"}next(){this._slide(at)}nextWhenVisible(){!document.hidden&&a(this._element)&&this.next()}prev(){this._slide(lt)}pause(){this._isSliding&&s(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?N.one(this._element,ut,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void N.one(this._element,ut,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?at:lt;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&N.on(this._element,ft,(t=>this._keydown(t))),"hover"===this._config.pause&&(N.on(this._element,pt,(()=>this.pause())),N.on(this._element,mt,(()=>this._maybeEnableCycle()))),this._config.touch&&st.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of z.find(".carousel-item img",this._element))N.on(t,gt,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ct)),rightCallback:()=>this._slide(this._directionToOrder(ht)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new st(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=Tt[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=z.findOne(wt,this._indicatorsElement);e.classList.remove(yt),e.removeAttribute("aria-current");const i=z.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(yt),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===at,s=e||b(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>N.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(dt).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),d(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(yt),i.classList.remove(yt,c,l),this._isSliding=!1,r(ut)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return z.findOne(Et,this._element)}_getItems(){return z.find(At,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return p()?t===ct?lt:at:t===ct?at:lt}_orderToDirection(t){return p()?t===lt?ct:ht:t===lt?ht:ct}static jQueryInterface(t){return this.each((function(){const e=xt.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}N.on(document,bt,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=z.getElementFromSelector(this);if(!e||!e.classList.contains(vt))return;t.preventDefault();const i=xt.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===F.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),N.on(window,_t,(()=>{const t=z.find('[data-bs-ride="carousel"]');for(const e of t)xt.getOrCreateInstance(e)})),m(xt);const kt=".bs.collapse",Lt=`show${kt}`,St=`shown${kt}`,Dt=`hide${kt}`,$t=`hidden${kt}`,It=`click${kt}.data-api`,Nt="show",Pt="collapse",Mt="collapsing",jt=`:scope .${Pt} .${Pt}`,Ft='[data-bs-toggle="collapse"]',Ht={parent:null,toggle:!0},Wt={parent:"(null|element)",toggle:"boolean"};class Bt extends W{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=z.find(Ft);for(const t of i){const e=z.getSelectorFromElement(t),i=z.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return Ht}static get DefaultType(){return Wt}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>Bt.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(N.trigger(this._element,Lt).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(Pt),this._element.classList.add(Mt),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(Mt),this._element.classList.add(Pt,Nt),this._element.style[e]="",N.trigger(this._element,St)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(N.trigger(this._element,Dt).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,d(this._element),this._element.classList.add(Mt),this._element.classList.remove(Pt,Nt);for(const t of this._triggerArray){const e=z.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(Mt),this._element.classList.add(Pt),N.trigger(this._element,$t)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(Nt)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=r(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(Ft);for(const e of t){const t=z.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=z.find(jt,this._config.parent);return z.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Bt.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}N.on(document,It,Ft,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of z.getMultipleElementsFromSelector(this))Bt.getOrCreateInstance(t,{toggle:!1}).toggle()})),m(Bt);var zt="top",Rt="bottom",qt="right",Vt="left",Kt="auto",Qt=[zt,Rt,qt,Vt],Xt="start",Yt="end",Ut="clippingParents",Gt="viewport",Jt="popper",Zt="reference",te=Qt.reduce((function(t,e){return t.concat([e+"-"+Xt,e+"-"+Yt])}),[]),ee=[].concat(Qt,[Kt]).reduce((function(t,e){return t.concat([e,e+"-"+Xt,e+"-"+Yt])}),[]),ie="beforeRead",ne="read",se="afterRead",oe="beforeMain",re="main",ae="afterMain",le="beforeWrite",ce="write",he="afterWrite",de=[ie,ne,se,oe,re,ae,le,ce,he];function ue(t){return t?(t.nodeName||"").toLowerCase():null}function fe(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function pe(t){return t instanceof fe(t).Element||t instanceof Element}function me(t){return t instanceof fe(t).HTMLElement||t instanceof HTMLElement}function ge(t){return"undefined"!=typeof ShadowRoot&&(t instanceof fe(t).ShadowRoot||t instanceof ShadowRoot)}const _e={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];me(s)&&ue(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});me(n)&&ue(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function be(t){return t.split("-")[0]}var ve=Math.max,ye=Math.min,we=Math.round;function Ae(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function Ee(){return!/^((?!chrome|android).)*safari/i.test(Ae())}function Te(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&me(t)&&(s=t.offsetWidth>0&&we(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&we(n.height)/t.offsetHeight||1);var r=(pe(t)?fe(t):window).visualViewport,a=!Ee()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function Ce(t){var e=Te(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function Oe(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&ge(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function xe(t){return fe(t).getComputedStyle(t)}function ke(t){return["table","td","th"].indexOf(ue(t))>=0}function Le(t){return((pe(t)?t.ownerDocument:t.document)||window.document).documentElement}function Se(t){return"html"===ue(t)?t:t.assignedSlot||t.parentNode||(ge(t)?t.host:null)||Le(t)}function De(t){return me(t)&&"fixed"!==xe(t).position?t.offsetParent:null}function $e(t){for(var e=fe(t),i=De(t);i&&ke(i)&&"static"===xe(i).position;)i=De(i);return i&&("html"===ue(i)||"body"===ue(i)&&"static"===xe(i).position)?e:i||function(t){var e=/firefox/i.test(Ae());if(/Trident/i.test(Ae())&&me(t)&&"fixed"===xe(t).position)return null;var i=Se(t);for(ge(i)&&(i=i.host);me(i)&&["html","body"].indexOf(ue(i))<0;){var n=xe(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Ie(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function Ne(t,e,i){return ve(t,ye(e,i))}function Pe(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function Me(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const je={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,s=t.options,o=i.elements.arrow,r=i.modifiersData.popperOffsets,a=be(i.placement),l=Ie(a),c=[Vt,qt].indexOf(a)>=0?"height":"width";if(o&&r){var h=function(t,e){return Pe("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:Me(t,Qt))}(s.padding,i),d=Ce(o),u="y"===l?zt:Vt,f="y"===l?Rt:qt,p=i.rects.reference[c]+i.rects.reference[l]-r[l]-i.rects.popper[c],m=r[l]-i.rects.reference[l],g=$e(o),_=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=p/2-m/2,v=h[u],y=_-d[c]-h[f],w=_/2-d[c]/2+b,A=Ne(v,w,y),E=l;i.modifiersData[n]=((e={})[E]=A,e.centerOffset=A-w,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&Oe(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Fe(t){return t.split("-")[1]}var He={top:"auto",right:"auto",bottom:"auto",left:"auto"};function We(t){var e,i=t.popper,n=t.popperRect,s=t.placement,o=t.variation,r=t.offsets,a=t.position,l=t.gpuAcceleration,c=t.adaptive,h=t.roundOffsets,d=t.isFixed,u=r.x,f=void 0===u?0:u,p=r.y,m=void 0===p?0:p,g="function"==typeof h?h({x:f,y:m}):{x:f,y:m};f=g.x,m=g.y;var _=r.hasOwnProperty("x"),b=r.hasOwnProperty("y"),v=Vt,y=zt,w=window;if(c){var A=$e(i),E="clientHeight",T="clientWidth";A===fe(i)&&"static"!==xe(A=Le(i)).position&&"absolute"===a&&(E="scrollHeight",T="scrollWidth"),(s===zt||(s===Vt||s===qt)&&o===Yt)&&(y=Rt,m-=(d&&A===w&&w.visualViewport?w.visualViewport.height:A[E])-n.height,m*=l?1:-1),s!==Vt&&(s!==zt&&s!==Rt||o!==Yt)||(v=qt,f-=(d&&A===w&&w.visualViewport?w.visualViewport.width:A[T])-n.width,f*=l?1:-1)}var C,O=Object.assign({position:a},c&&He),x=!0===h?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:we(i*s)/s||0,y:we(n*s)/s||0}}({x:f,y:m},fe(i)):{x:f,y:m};return f=x.x,m=x.y,l?Object.assign({},O,((C={})[y]=b?"0":"",C[v]=_?"0":"",C.transform=(w.devicePixelRatio||1)<=1?"translate("+f+"px, "+m+"px)":"translate3d("+f+"px, "+m+"px, 0)",C)):Object.assign({},O,((e={})[y]=b?m+"px":"",e[v]=_?f+"px":"",e.transform="",e))}const Be={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:be(e.placement),variation:Fe(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,We(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,We(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var ze={passive:!0};const Re={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=fe(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,ze)})),a&&l.addEventListener("resize",i.update,ze),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,ze)})),a&&l.removeEventListener("resize",i.update,ze)}},data:{}};var qe={left:"right",right:"left",bottom:"top",top:"bottom"};function Ve(t){return t.replace(/left|right|bottom|top/g,(function(t){return qe[t]}))}var Ke={start:"end",end:"start"};function Qe(t){return t.replace(/start|end/g,(function(t){return Ke[t]}))}function Xe(t){var e=fe(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function Ye(t){return Te(Le(t)).left+Xe(t).scrollLeft}function Ue(t){var e=xe(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function Ge(t){return["html","body","#document"].indexOf(ue(t))>=0?t.ownerDocument.body:me(t)&&Ue(t)?t:Ge(Se(t))}function Je(t,e){var i;void 0===e&&(e=[]);var n=Ge(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=fe(n),r=s?[o].concat(o.visualViewport||[],Ue(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(Je(Se(r)))}function Ze(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function ti(t,e,i){return e===Gt?Ze(function(t,e){var i=fe(t),n=Le(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=Ee();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+Ye(t),y:l}}(t,i)):pe(e)?function(t,e){var i=Te(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):Ze(function(t){var e,i=Le(t),n=Xe(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=ve(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=ve(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+Ye(t),l=-n.scrollTop;return"rtl"===xe(s||i).direction&&(a+=ve(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(Le(t)))}function ei(t){var e,i=t.reference,n=t.element,s=t.placement,o=s?be(s):null,r=s?Fe(s):null,a=i.x+i.width/2-n.width/2,l=i.y+i.height/2-n.height/2;switch(o){case zt:e={x:a,y:i.y-n.height};break;case Rt:e={x:a,y:i.y+i.height};break;case qt:e={x:i.x+i.width,y:l};break;case Vt:e={x:i.x-n.width,y:l};break;default:e={x:i.x,y:i.y}}var c=o?Ie(o):null;if(null!=c){var h="y"===c?"height":"width";switch(r){case Xt:e[c]=e[c]-(i[h]/2-n[h]/2);break;case Yt:e[c]=e[c]+(i[h]/2-n[h]/2)}}return e}function ii(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=void 0===n?t.placement:n,o=i.strategy,r=void 0===o?t.strategy:o,a=i.boundary,l=void 0===a?Ut:a,c=i.rootBoundary,h=void 0===c?Gt:c,d=i.elementContext,u=void 0===d?Jt:d,f=i.altBoundary,p=void 0!==f&&f,m=i.padding,g=void 0===m?0:m,_=Pe("number"!=typeof g?g:Me(g,Qt)),b=u===Jt?Zt:Jt,v=t.rects.popper,y=t.elements[p?b:u],w=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=Je(Se(t)),i=["absolute","fixed"].indexOf(xe(t).position)>=0&&me(t)?$e(t):t;return pe(i)?e.filter((function(t){return pe(t)&&Oe(t,i)&&"body"!==ue(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=ti(t,i,n);return e.top=ve(s.top,e.top),e.right=ye(s.right,e.right),e.bottom=ye(s.bottom,e.bottom),e.left=ve(s.left,e.left),e}),ti(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(pe(y)?y:y.contextElement||Le(t.elements.popper),l,h,r),A=Te(t.elements.reference),E=ei({reference:A,element:v,strategy:"absolute",placement:s}),T=Ze(Object.assign({},v,E)),C=u===Jt?T:A,O={top:w.top-C.top+_.top,bottom:C.bottom-w.bottom+_.bottom,left:w.left-C.left+_.left,right:C.right-w.right+_.right},x=t.modifiersData.offset;if(u===Jt&&x){var k=x[s];Object.keys(O).forEach((function(t){var e=[qt,Rt].indexOf(t)>=0?1:-1,i=[zt,Rt].indexOf(t)>=0?"y":"x";O[t]+=k[i]*e}))}return O}function ni(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,c=void 0===l?ee:l,h=Fe(n),d=h?a?te:te.filter((function(t){return Fe(t)===h})):Qt,u=d.filter((function(t){return c.indexOf(t)>=0}));0===u.length&&(u=d);var f=u.reduce((function(e,i){return e[i]=ii(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[be(i)],e}),{});return Object.keys(f).sort((function(t,e){return f[t]-f[e]}))}const si={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0===r||r,l=i.fallbackPlacements,c=i.padding,h=i.boundary,d=i.rootBoundary,u=i.altBoundary,f=i.flipVariations,p=void 0===f||f,m=i.allowedAutoPlacements,g=e.options.placement,_=be(g),b=l||(_!==g&&p?function(t){if(be(t)===Kt)return[];var e=Ve(t);return[Qe(t),e,Qe(e)]}(g):[Ve(g)]),v=[g].concat(b).reduce((function(t,i){return t.concat(be(i)===Kt?ni(e,{placement:i,boundary:h,rootBoundary:d,padding:c,flipVariations:p,allowedAutoPlacements:m}):i)}),[]),y=e.rects.reference,w=e.rects.popper,A=new Map,E=!0,T=v[0],C=0;C=0,S=L?"width":"height",D=ii(e,{placement:O,boundary:h,rootBoundary:d,altBoundary:u,padding:c}),$=L?k?qt:Vt:k?Rt:zt;y[S]>w[S]&&($=Ve($));var I=Ve($),N=[];if(o&&N.push(D[x]<=0),a&&N.push(D[$]<=0,D[I]<=0),N.every((function(t){return t}))){T=O,E=!1;break}A.set(O,N)}if(E)for(var P=function(t){var e=v.find((function(e){var i=A.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return T=e,"break"},M=p?3:1;M>0&&"break"!==P(M);M--);e.placement!==T&&(e.modifiersData[n]._skip=!0,e.placement=T,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function oi(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function ri(t){return[zt,qt,Rt,Vt].some((function(e){return t[e]>=0}))}const ai={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=ii(e,{elementContext:"reference"}),a=ii(e,{altBoundary:!0}),l=oi(r,n),c=oi(a,s,o),h=ri(l),d=ri(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},li={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.offset,o=void 0===s?[0,0]:s,r=ee.reduce((function(t,i){return t[i]=function(t,e,i){var n=be(t),s=[Vt,zt].indexOf(n)>=0?-1:1,o="function"==typeof i?i(Object.assign({},e,{placement:t})):i,r=o[0],a=o[1];return r=r||0,a=(a||0)*s,[Vt,qt].indexOf(n)>=0?{x:a,y:r}:{x:r,y:a}}(i,e.rects,o),t}),{}),a=r[e.placement],l=a.x,c=a.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=l,e.modifiersData.popperOffsets.y+=c),e.modifiersData[n]=r}},ci={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=ei({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},hi={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0!==r&&r,l=i.boundary,c=i.rootBoundary,h=i.altBoundary,d=i.padding,u=i.tether,f=void 0===u||u,p=i.tetherOffset,m=void 0===p?0:p,g=ii(e,{boundary:l,rootBoundary:c,padding:d,altBoundary:h}),_=be(e.placement),b=Fe(e.placement),v=!b,y=Ie(_),w="x"===y?"y":"x",A=e.modifiersData.popperOffsets,E=e.rects.reference,T=e.rects.popper,C="function"==typeof m?m(Object.assign({},e.rects,{placement:e.placement})):m,O="number"==typeof C?{mainAxis:C,altAxis:C}:Object.assign({mainAxis:0,altAxis:0},C),x=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,k={x:0,y:0};if(A){if(o){var L,S="y"===y?zt:Vt,D="y"===y?Rt:qt,$="y"===y?"height":"width",I=A[y],N=I+g[S],P=I-g[D],M=f?-T[$]/2:0,j=b===Xt?E[$]:T[$],F=b===Xt?-T[$]:-E[$],H=e.elements.arrow,W=f&&H?Ce(H):{width:0,height:0},B=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},z=B[S],R=B[D],q=Ne(0,E[$],W[$]),V=v?E[$]/2-M-q-z-O.mainAxis:j-q-z-O.mainAxis,K=v?-E[$]/2+M+q+R+O.mainAxis:F+q+R+O.mainAxis,Q=e.elements.arrow&&$e(e.elements.arrow),X=Q?"y"===y?Q.clientTop||0:Q.clientLeft||0:0,Y=null!=(L=null==x?void 0:x[y])?L:0,U=I+K-Y,G=Ne(f?ye(N,I+V-Y-X):N,I,f?ve(P,U):P);A[y]=G,k[y]=G-I}if(a){var J,Z="x"===y?zt:Vt,tt="x"===y?Rt:qt,et=A[w],it="y"===w?"height":"width",nt=et+g[Z],st=et-g[tt],ot=-1!==[zt,Vt].indexOf(_),rt=null!=(J=null==x?void 0:x[w])?J:0,at=ot?nt:et-E[it]-T[it]-rt+O.altAxis,lt=ot?et+E[it]+T[it]-rt-O.altAxis:st,ct=f&&ot?function(t,e,i){var n=Ne(t,e,i);return n>i?i:n}(at,et,lt):Ne(f?at:nt,et,f?lt:st);A[w]=ct,k[w]=ct-et}e.modifiersData[n]=k}},requiresIfExists:["offset"]};function di(t,e,i){void 0===i&&(i=!1);var n,s,o=me(e),r=me(e)&&function(t){var e=t.getBoundingClientRect(),i=we(e.width)/t.offsetWidth||1,n=we(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=Le(e),l=Te(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==ue(e)||Ue(a))&&(c=(n=e)!==fe(n)&&me(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:Xe(n)),me(e)?((h=Te(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=Ye(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function ui(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var fi={placement:"bottom",modifiers:[],strategy:"absolute"};function pi(){for(var t=arguments.length,e=new Array(t),i=0;iNumber.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(F.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...g(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=z.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>a(t)));i.length&&b(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=qi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=z.find(Ni);for(const i of e){const e=qi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ei,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ii)?this:z.prev(this,Ii)[0]||z.next(this,Ii)[0]||z.findOne(Ii,t.delegateTarget.parentNode),o=qi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}N.on(document,Si,Ii,qi.dataApiKeydownHandler),N.on(document,Si,Pi,qi.dataApiKeydownHandler),N.on(document,Li,qi.clearMenus),N.on(document,Di,qi.clearMenus),N.on(document,Li,Ii,(function(t){t.preventDefault(),qi.getOrCreateInstance(this).toggle()})),m(qi);const Vi="backdrop",Ki="show",Qi=`mousedown.bs.${Vi}`,Xi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Yi={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Ui extends H{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Xi}static get DefaultType(){return Yi}static get NAME(){return Vi}show(t){if(!this._config.isVisible)return void g(t);this._append();const e=this._getElement();this._config.isAnimated&&d(e),e.classList.add(Ki),this._emulateAnimation((()=>{g(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Ki),this._emulateAnimation((()=>{this.dispose(),g(t)}))):g(t)}dispose(){this._isAppended&&(N.off(this._element,Qi),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=r(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),N.on(t,Qi,(()=>{g(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){_(t,this._getElement(),this._config.isAnimated)}}const Gi=".bs.focustrap",Ji=`focusin${Gi}`,Zi=`keydown.tab${Gi}`,tn="backward",en={autofocus:!0,trapElement:null},nn={autofocus:"boolean",trapElement:"element"};class sn extends H{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return en}static get DefaultType(){return nn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),N.off(document,Gi),N.on(document,Ji,(t=>this._handleFocusin(t))),N.on(document,Zi,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,N.off(document,Gi))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=z.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===tn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?tn:"forward")}}const on=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",rn=".sticky-top",an="padding-right",ln="margin-right";class cn{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,an,(e=>e+t)),this._setElementAttributes(on,an,(e=>e+t)),this._setElementAttributes(rn,ln,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,an),this._resetElementAttributes(on,an),this._resetElementAttributes(rn,ln)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&F.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=F.getDataAttribute(t,e);null!==i?(F.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(o(t))e(t);else for(const i of z.find(t,this._element))e(i)}}const hn=".bs.modal",dn=`hide${hn}`,un=`hidePrevented${hn}`,fn=`hidden${hn}`,pn=`show${hn}`,mn=`shown${hn}`,gn=`resize${hn}`,_n=`click.dismiss${hn}`,bn=`mousedown.dismiss${hn}`,vn=`keydown.dismiss${hn}`,yn=`click${hn}.data-api`,wn="modal-open",An="show",En="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},Cn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class On extends W{constructor(t,e){super(t,e),this._dialog=z.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new cn,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return Cn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||N.trigger(this._element,pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(wn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(N.trigger(this._element,dn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(An),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){N.off(window,hn),N.off(this._dialog,hn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Ui({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=z.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),d(this._element),this._element.classList.add(An),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,N.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){N.on(this._element,vn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),N.on(window,gn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),N.on(this._element,bn,(t=>{N.one(this._element,_n,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(wn),this._resetAdjustments(),this._scrollBar.reset(),N.trigger(this._element,fn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(N.trigger(this._element,un).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(En)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(En),this._queueCallback((()=>{this._element.classList.remove(En),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=p()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=p()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=On.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}N.on(document,yn,'[data-bs-toggle="modal"]',(function(t){const e=z.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),N.one(e,pn,(t=>{t.defaultPrevented||N.one(e,fn,(()=>{a(this)&&this.focus()}))}));const i=z.findOne(".modal.show");i&&On.getInstance(i).hide(),On.getOrCreateInstance(e).toggle(this)})),R(On),m(On);const xn=".bs.offcanvas",kn=".data-api",Ln=`load${xn}${kn}`,Sn="show",Dn="showing",$n="hiding",In=".offcanvas.show",Nn=`show${xn}`,Pn=`shown${xn}`,Mn=`hide${xn}`,jn=`hidePrevented${xn}`,Fn=`hidden${xn}`,Hn=`resize${xn}`,Wn=`click${xn}${kn}`,Bn=`keydown.dismiss${xn}`,zn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class qn extends W{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return zn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||N.trigger(this._element,Nn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new cn).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Dn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(Sn),this._element.classList.remove(Dn),N.trigger(this._element,Pn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(N.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add($n),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(Sn,$n),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new cn).reset(),N.trigger(this._element,Fn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Ui({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():N.trigger(this._element,jn)}:null})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_addEventListeners(){N.on(this._element,Bn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():N.trigger(this._element,jn))}))}static jQueryInterface(t){return this.each((function(){const e=qn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}N.on(document,Wn,'[data-bs-toggle="offcanvas"]',(function(t){const e=z.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this))return;N.one(e,Fn,(()=>{a(this)&&this.focus()}));const i=z.findOne(In);i&&i!==e&&qn.getInstance(i).hide(),qn.getOrCreateInstance(e).toggle(this)})),N.on(window,Ln,(()=>{for(const t of z.find(In))qn.getOrCreateInstance(t).show()})),N.on(window,Hn,(()=>{for(const t of z.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&qn.getOrCreateInstance(t).hide()})),R(qn),m(qn);const Vn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Kn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Qn=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Xn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Kn.has(i)||Boolean(Qn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Yn={allowList:Vn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Un={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Gn={entry:"(string|element|function|null)",selector:"(string|element)"};class Jn extends H{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Yn}static get DefaultType(){return Un}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Gn)}_setContent(t,e,i){const n=z.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?o(e)?this._putElementInTemplate(r(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Xn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return g(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const Zn=new Set(["sanitize","allowList","sanitizeFn"]),ts="fade",es="show",is=".modal",ns="hide.bs.modal",ss="hover",os="focus",rs={AUTO:"auto",TOP:"top",RIGHT:p()?"left":"right",BOTTOM:"bottom",LEFT:p()?"right":"left"},as={allowList:Vn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},ls={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class cs extends W{constructor(t,e){if(void 0===vi)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,e),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return as}static get DefaultType(){return ls}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),N.off(this._element.closest(is),ns,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=N.trigger(this._element,this.constructor.eventName("show")),e=(c(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),N.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))N.on(t,"mouseover",h);this._queueCallback((()=>{N.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!N.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))N.off(t,"mouseover",h);this._activeTrigger.click=!1,this._activeTrigger[os]=!1,this._activeTrigger[ss]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),N.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ts,es),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ts),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Jn({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ts)}_isShown(){return this.tip&&this.tip.classList.contains(es)}_createPopper(t){const e=g(this._config.placement,[this,t,this._element]),i=rs[e.toUpperCase()];return bi(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return g(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...g(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)N.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ss?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ss?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");N.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?os:ss]=!0,e._enter()})),N.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?os:ss]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},N.on(this._element.closest(is),ns,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=F.getDataAttributes(this._element);for(const t of Object.keys(e))Zn.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:r(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=cs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(cs);const hs={...cs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},ds={...cs.DefaultType,content:"(null|string|element|function)"};class us extends cs{static get Default(){return hs}static get DefaultType(){return ds}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=us.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(us);const fs=".bs.scrollspy",ps=`activate${fs}`,ms=`click${fs}`,gs=`load${fs}.data-api`,_s="active",bs="[href]",vs=".nav-link",ys=`${vs}, .nav-item > ${vs}, .list-group-item`,ws={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},As={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Es extends W{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return ws}static get DefaultType(){return As}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=r(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(N.off(this._config.target,ms),N.on(this._config.target,ms,bs,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=z.find(bs,this._config.target);for(const e of t){if(!e.hash||l(e))continue;const t=z.findOne(decodeURI(e.hash),this._element);a(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(_s),this._activateParents(t),N.trigger(this._element,ps,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))z.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(_s);else for(const e of z.parents(t,".nav, .list-group"))for(const t of z.prev(e,ys))t.classList.add(_s)}_clearActiveClass(t){t.classList.remove(_s);const e=z.find(`${bs}.${_s}`,t);for(const t of e)t.classList.remove(_s)}static jQueryInterface(t){return this.each((function(){const e=Es.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}N.on(window,gs,(()=>{for(const t of z.find('[data-bs-spy="scroll"]'))Es.getOrCreateInstance(t)})),m(Es);const Ts=".bs.tab",Cs=`hide${Ts}`,Os=`hidden${Ts}`,xs=`show${Ts}`,ks=`shown${Ts}`,Ls=`click${Ts}`,Ss=`keydown${Ts}`,Ds=`load${Ts}`,$s="ArrowLeft",Is="ArrowRight",Ns="ArrowUp",Ps="ArrowDown",Ms="Home",js="End",Fs="active",Hs="fade",Ws="show",Bs=":not(.dropdown-toggle)",zs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',Rs=`.nav-link${Bs}, .list-group-item${Bs}, [role="tab"]${Bs}, ${zs}`,qs=`.${Fs}[data-bs-toggle="tab"], .${Fs}[data-bs-toggle="pill"], .${Fs}[data-bs-toggle="list"]`;class Vs extends W{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),N.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?N.trigger(e,Cs,{relatedTarget:t}):null;N.trigger(t,xs,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Fs),this._activate(z.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),N.trigger(t,ks,{relatedTarget:e})):t.classList.add(Ws)}),t,t.classList.contains(Hs)))}_deactivate(t,e){t&&(t.classList.remove(Fs),t.blur(),this._deactivate(z.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),N.trigger(t,Os,{relatedTarget:e})):t.classList.remove(Ws)}),t,t.classList.contains(Hs)))}_keydown(t){if(![$s,Is,Ns,Ps,Ms,js].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!l(t)));let i;if([Ms,js].includes(t.key))i=e[t.key===Ms?0:e.length-1];else{const n=[Is,Ps].includes(t.key);i=b(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Vs.getOrCreateInstance(i).show())}_getChildren(){return z.find(Rs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=z.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=z.findOne(t,i);s&&s.classList.toggle(n,e)};n(".dropdown-toggle",Fs),n(".dropdown-menu",Ws),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Fs)}_getInnerElement(t){return t.matches(Rs)?t:z.findOne(Rs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Vs.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}N.on(document,Ls,zs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this)||Vs.getOrCreateInstance(this).show()})),N.on(window,Ds,(()=>{for(const t of z.find(qs))Vs.getOrCreateInstance(t)})),m(Vs);const Ks=".bs.toast",Qs=`mouseover${Ks}`,Xs=`mouseout${Ks}`,Ys=`focusin${Ks}`,Us=`focusout${Ks}`,Gs=`hide${Ks}`,Js=`hidden${Ks}`,Zs=`show${Ks}`,to=`shown${Ks}`,eo="hide",io="show",no="showing",so={animation:"boolean",autohide:"boolean",delay:"number"},oo={animation:!0,autohide:!0,delay:5e3};class ro extends W{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return oo}static get DefaultType(){return so}static get NAME(){return"toast"}show(){N.trigger(this._element,Zs).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(eo),d(this._element),this._element.classList.add(io,no),this._queueCallback((()=>{this._element.classList.remove(no),N.trigger(this._element,to),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(N.trigger(this._element,Gs).defaultPrevented||(this._element.classList.add(no),this._queueCallback((()=>{this._element.classList.add(eo),this._element.classList.remove(no,io),N.trigger(this._element,Js)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(io),super.dispose()}isShown(){return this._element.classList.contains(io)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){N.on(this._element,Qs,(t=>this._onInteraction(t,!0))),N.on(this._element,Xs,(t=>this._onInteraction(t,!1))),N.on(this._element,Ys,(t=>this._onInteraction(t,!0))),N.on(this._element,Us,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ro.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}return R(ro),m(ro),{Alert:Q,Button:Y,Carousel:xt,Collapse:Bt,Dropdown:qi,Modal:On,Offcanvas:qn,Popover:us,ScrollSpy:Es,Tab:Vs,Toast:ro,Tooltip:cs}})); +//# sourceMappingURL=bootstrap.bundle.min.js.map \ No newline at end of file diff --git a/site_libs/clipboard/clipboard.min.js b/site_libs/clipboard/clipboard.min.js new file mode 100644 index 000000000..1103f811e --- /dev/null +++ b/site_libs/clipboard/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return b}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),r=n.n(e);function c(t){try{return document.execCommand(t)}catch(t){return}}var a=function(t){t=r()(t);return c("cut"),t};function o(t,e){var n,o,t=(n=t,o="rtl"===document.documentElement.getAttribute("dir"),(t=document.createElement("textarea")).style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style[o?"right":"left"]="-9999px",o=window.pageYOffset||document.documentElement.scrollTop,t.style.top="".concat(o,"px"),t.setAttribute("readonly",""),t.value=n,t);return e.container.appendChild(t),e=r()(t),c("copy"),t.remove(),e}var f=function(t){var e=1.anchorjs-link,.anchorjs-link:focus{opacity:1}",A.sheet.cssRules.length),A.sheet.insertRule("[data-anchorjs-icon]::after{content:attr(data-anchorjs-icon)}",A.sheet.cssRules.length),A.sheet.insertRule('@font-face{font-family:anchorjs-icons;src:url(data:n/a;base64,AAEAAAALAIAAAwAwT1MvMg8yG2cAAAE4AAAAYGNtYXDp3gC3AAABpAAAAExnYXNwAAAAEAAAA9wAAAAIZ2x5ZlQCcfwAAAH4AAABCGhlYWQHFvHyAAAAvAAAADZoaGVhBnACFwAAAPQAAAAkaG10eASAADEAAAGYAAAADGxvY2EACACEAAAB8AAAAAhtYXhwAAYAVwAAARgAAAAgbmFtZQGOH9cAAAMAAAAAunBvc3QAAwAAAAADvAAAACAAAQAAAAEAAHzE2p9fDzz1AAkEAAAAAADRecUWAAAAANQA6R8AAAAAAoACwAAAAAgAAgAAAAAAAAABAAADwP/AAAACgAAA/9MCrQABAAAAAAAAAAAAAAAAAAAAAwABAAAAAwBVAAIAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAMCQAGQAAUAAAKZAswAAACPApkCzAAAAesAMwEJAAAAAAAAAAAAAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAQAAg//0DwP/AAEADwABAAAAAAQAAAAAAAAAAAAAAIAAAAAAAAAIAAAACgAAxAAAAAwAAAAMAAAAcAAEAAwAAABwAAwABAAAAHAAEADAAAAAIAAgAAgAAACDpy//9//8AAAAg6cv//f///+EWNwADAAEAAAAAAAAAAAAAAAAACACEAAEAAAAAAAAAAAAAAAAxAAACAAQARAKAAsAAKwBUAAABIiYnJjQ3NzY2MzIWFxYUBwcGIicmNDc3NjQnJiYjIgYHBwYUFxYUBwYGIwciJicmNDc3NjIXFhQHBwYUFxYWMzI2Nzc2NCcmNDc2MhcWFAcHBgYjARQGDAUtLXoWOR8fORYtLTgKGwoKCjgaGg0gEhIgDXoaGgkJBQwHdR85Fi0tOAobCgoKOBoaDSASEiANehoaCQkKGwotLXoWOR8BMwUFLYEuehYXFxYugC44CQkKGwo4GkoaDQ0NDXoaShoKGwoFBe8XFi6ALjgJCQobCjgaShoNDQ0NehpKGgobCgoKLYEuehYXAAAADACWAAEAAAAAAAEACAAAAAEAAAAAAAIAAwAIAAEAAAAAAAMACAAAAAEAAAAAAAQACAAAAAEAAAAAAAUAAQALAAEAAAAAAAYACAAAAAMAAQQJAAEAEAAMAAMAAQQJAAIABgAcAAMAAQQJAAMAEAAMAAMAAQQJAAQAEAAMAAMAAQQJAAUAAgAiAAMAAQQJAAYAEAAMYW5jaG9yanM0MDBAAGEAbgBjAGgAbwByAGoAcwA0ADAAMABAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAH//wAP) format("truetype")}',A.sheet.cssRules.length)),h=document.querySelectorAll("[id]"),t=[].map.call(h,function(A){return A.id}),i=0;i\]./()*\\\n\t\b\v\u00A0]/g,"-").replace(/-{2,}/g,"-").substring(0,this.options.truncate).replace(/^-+|-+$/gm,"").toLowerCase()},this.hasAnchorJSLink=function(A){var e=A.firstChild&&-1<(" "+A.firstChild.className+" ").indexOf(" anchorjs-link "),A=A.lastChild&&-1<(" "+A.lastChild.className+" ").indexOf(" anchorjs-link ");return e||A||!1}}}); +// @license-end \ No newline at end of file diff --git a/site_libs/quarto-html/popper.min.js b/site_libs/quarto-html/popper.min.js new file mode 100644 index 000000000..e3726d728 --- /dev/null +++ b/site_libs/quarto-html/popper.min.js @@ -0,0 +1,6 @@ +/** + * @popperjs/core v2.11.7 - MIT License + */ + +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).Popper={})}(this,(function(e){"use strict";function t(e){if(null==e)return window;if("[object Window]"!==e.toString()){var t=e.ownerDocument;return t&&t.defaultView||window}return e}function n(e){return e instanceof t(e).Element||e instanceof Element}function r(e){return e instanceof t(e).HTMLElement||e instanceof HTMLElement}function o(e){return"undefined"!=typeof ShadowRoot&&(e instanceof t(e).ShadowRoot||e instanceof ShadowRoot)}var i=Math.max,a=Math.min,s=Math.round;function f(){var e=navigator.userAgentData;return null!=e&&e.brands&&Array.isArray(e.brands)?e.brands.map((function(e){return e.brand+"/"+e.version})).join(" "):navigator.userAgent}function c(){return!/^((?!chrome|android).)*safari/i.test(f())}function p(e,o,i){void 0===o&&(o=!1),void 0===i&&(i=!1);var a=e.getBoundingClientRect(),f=1,p=1;o&&r(e)&&(f=e.offsetWidth>0&&s(a.width)/e.offsetWidth||1,p=e.offsetHeight>0&&s(a.height)/e.offsetHeight||1);var u=(n(e)?t(e):window).visualViewport,l=!c()&&i,d=(a.left+(l&&u?u.offsetLeft:0))/f,h=(a.top+(l&&u?u.offsetTop:0))/p,m=a.width/f,v=a.height/p;return{width:m,height:v,top:h,right:d+m,bottom:h+v,left:d,x:d,y:h}}function u(e){var n=t(e);return{scrollLeft:n.pageXOffset,scrollTop:n.pageYOffset}}function l(e){return e?(e.nodeName||"").toLowerCase():null}function d(e){return((n(e)?e.ownerDocument:e.document)||window.document).documentElement}function h(e){return p(d(e)).left+u(e).scrollLeft}function m(e){return t(e).getComputedStyle(e)}function v(e){var t=m(e),n=t.overflow,r=t.overflowX,o=t.overflowY;return/auto|scroll|overlay|hidden/.test(n+o+r)}function y(e,n,o){void 0===o&&(o=!1);var i,a,f=r(n),c=r(n)&&function(e){var t=e.getBoundingClientRect(),n=s(t.width)/e.offsetWidth||1,r=s(t.height)/e.offsetHeight||1;return 1!==n||1!==r}(n),m=d(n),y=p(e,c,o),g={scrollLeft:0,scrollTop:0},b={x:0,y:0};return(f||!f&&!o)&&(("body"!==l(n)||v(m))&&(g=(i=n)!==t(i)&&r(i)?{scrollLeft:(a=i).scrollLeft,scrollTop:a.scrollTop}:u(i)),r(n)?((b=p(n,!0)).x+=n.clientLeft,b.y+=n.clientTop):m&&(b.x=h(m))),{x:y.left+g.scrollLeft-b.x,y:y.top+g.scrollTop-b.y,width:y.width,height:y.height}}function g(e){var t=p(e),n=e.offsetWidth,r=e.offsetHeight;return Math.abs(t.width-n)<=1&&(n=t.width),Math.abs(t.height-r)<=1&&(r=t.height),{x:e.offsetLeft,y:e.offsetTop,width:n,height:r}}function b(e){return"html"===l(e)?e:e.assignedSlot||e.parentNode||(o(e)?e.host:null)||d(e)}function x(e){return["html","body","#document"].indexOf(l(e))>=0?e.ownerDocument.body:r(e)&&v(e)?e:x(b(e))}function w(e,n){var r;void 0===n&&(n=[]);var o=x(e),i=o===(null==(r=e.ownerDocument)?void 0:r.body),a=t(o),s=i?[a].concat(a.visualViewport||[],v(o)?o:[]):o,f=n.concat(s);return i?f:f.concat(w(b(s)))}function O(e){return["table","td","th"].indexOf(l(e))>=0}function j(e){return r(e)&&"fixed"!==m(e).position?e.offsetParent:null}function E(e){for(var n=t(e),i=j(e);i&&O(i)&&"static"===m(i).position;)i=j(i);return i&&("html"===l(i)||"body"===l(i)&&"static"===m(i).position)?n:i||function(e){var t=/firefox/i.test(f());if(/Trident/i.test(f())&&r(e)&&"fixed"===m(e).position)return null;var n=b(e);for(o(n)&&(n=n.host);r(n)&&["html","body"].indexOf(l(n))<0;){var i=m(n);if("none"!==i.transform||"none"!==i.perspective||"paint"===i.contain||-1!==["transform","perspective"].indexOf(i.willChange)||t&&"filter"===i.willChange||t&&i.filter&&"none"!==i.filter)return n;n=n.parentNode}return null}(e)||n}var D="top",A="bottom",L="right",P="left",M="auto",k=[D,A,L,P],W="start",B="end",H="viewport",T="popper",R=k.reduce((function(e,t){return e.concat([t+"-"+W,t+"-"+B])}),[]),S=[].concat(k,[M]).reduce((function(e,t){return e.concat([t,t+"-"+W,t+"-"+B])}),[]),V=["beforeRead","read","afterRead","beforeMain","main","afterMain","beforeWrite","write","afterWrite"];function q(e){var t=new Map,n=new Set,r=[];function o(e){n.add(e.name),[].concat(e.requires||[],e.requiresIfExists||[]).forEach((function(e){if(!n.has(e)){var r=t.get(e);r&&o(r)}})),r.push(e)}return e.forEach((function(e){t.set(e.name,e)})),e.forEach((function(e){n.has(e.name)||o(e)})),r}function C(e){return e.split("-")[0]}function N(e,t){var n=t.getRootNode&&t.getRootNode();if(e.contains(t))return!0;if(n&&o(n)){var r=t;do{if(r&&e.isSameNode(r))return!0;r=r.parentNode||r.host}while(r)}return!1}function I(e){return Object.assign({},e,{left:e.x,top:e.y,right:e.x+e.width,bottom:e.y+e.height})}function _(e,r,o){return r===H?I(function(e,n){var r=t(e),o=d(e),i=r.visualViewport,a=o.clientWidth,s=o.clientHeight,f=0,p=0;if(i){a=i.width,s=i.height;var u=c();(u||!u&&"fixed"===n)&&(f=i.offsetLeft,p=i.offsetTop)}return{width:a,height:s,x:f+h(e),y:p}}(e,o)):n(r)?function(e,t){var n=p(e,!1,"fixed"===t);return n.top=n.top+e.clientTop,n.left=n.left+e.clientLeft,n.bottom=n.top+e.clientHeight,n.right=n.left+e.clientWidth,n.width=e.clientWidth,n.height=e.clientHeight,n.x=n.left,n.y=n.top,n}(r,o):I(function(e){var t,n=d(e),r=u(e),o=null==(t=e.ownerDocument)?void 0:t.body,a=i(n.scrollWidth,n.clientWidth,o?o.scrollWidth:0,o?o.clientWidth:0),s=i(n.scrollHeight,n.clientHeight,o?o.scrollHeight:0,o?o.clientHeight:0),f=-r.scrollLeft+h(e),c=-r.scrollTop;return"rtl"===m(o||n).direction&&(f+=i(n.clientWidth,o?o.clientWidth:0)-a),{width:a,height:s,x:f,y:c}}(d(e)))}function F(e,t,o,s){var f="clippingParents"===t?function(e){var t=w(b(e)),o=["absolute","fixed"].indexOf(m(e).position)>=0&&r(e)?E(e):e;return n(o)?t.filter((function(e){return n(e)&&N(e,o)&&"body"!==l(e)})):[]}(e):[].concat(t),c=[].concat(f,[o]),p=c[0],u=c.reduce((function(t,n){var r=_(e,n,s);return t.top=i(r.top,t.top),t.right=a(r.right,t.right),t.bottom=a(r.bottom,t.bottom),t.left=i(r.left,t.left),t}),_(e,p,s));return u.width=u.right-u.left,u.height=u.bottom-u.top,u.x=u.left,u.y=u.top,u}function U(e){return e.split("-")[1]}function z(e){return["top","bottom"].indexOf(e)>=0?"x":"y"}function X(e){var t,n=e.reference,r=e.element,o=e.placement,i=o?C(o):null,a=o?U(o):null,s=n.x+n.width/2-r.width/2,f=n.y+n.height/2-r.height/2;switch(i){case D:t={x:s,y:n.y-r.height};break;case A:t={x:s,y:n.y+n.height};break;case L:t={x:n.x+n.width,y:f};break;case P:t={x:n.x-r.width,y:f};break;default:t={x:n.x,y:n.y}}var c=i?z(i):null;if(null!=c){var p="y"===c?"height":"width";switch(a){case W:t[c]=t[c]-(n[p]/2-r[p]/2);break;case B:t[c]=t[c]+(n[p]/2-r[p]/2)}}return t}function Y(e){return Object.assign({},{top:0,right:0,bottom:0,left:0},e)}function G(e,t){return t.reduce((function(t,n){return t[n]=e,t}),{})}function J(e,t){void 0===t&&(t={});var r=t,o=r.placement,i=void 0===o?e.placement:o,a=r.strategy,s=void 0===a?e.strategy:a,f=r.boundary,c=void 0===f?"clippingParents":f,u=r.rootBoundary,l=void 0===u?H:u,h=r.elementContext,m=void 0===h?T:h,v=r.altBoundary,y=void 0!==v&&v,g=r.padding,b=void 0===g?0:g,x=Y("number"!=typeof b?b:G(b,k)),w=m===T?"reference":T,O=e.rects.popper,j=e.elements[y?w:m],E=F(n(j)?j:j.contextElement||d(e.elements.popper),c,l,s),P=p(e.elements.reference),M=X({reference:P,element:O,strategy:"absolute",placement:i}),W=I(Object.assign({},O,M)),B=m===T?W:P,R={top:E.top-B.top+x.top,bottom:B.bottom-E.bottom+x.bottom,left:E.left-B.left+x.left,right:B.right-E.right+x.right},S=e.modifiersData.offset;if(m===T&&S){var V=S[i];Object.keys(R).forEach((function(e){var t=[L,A].indexOf(e)>=0?1:-1,n=[D,A].indexOf(e)>=0?"y":"x";R[e]+=V[n]*t}))}return R}var K={placement:"bottom",modifiers:[],strategy:"absolute"};function Q(){for(var e=arguments.length,t=new Array(e),n=0;n=0?-1:1,i="function"==typeof n?n(Object.assign({},t,{placement:e})):n,a=i[0],s=i[1];return a=a||0,s=(s||0)*o,[P,L].indexOf(r)>=0?{x:s,y:a}:{x:a,y:s}}(n,t.rects,i),e}),{}),s=a[t.placement],f=s.x,c=s.y;null!=t.modifiersData.popperOffsets&&(t.modifiersData.popperOffsets.x+=f,t.modifiersData.popperOffsets.y+=c),t.modifiersData[r]=a}},se={left:"right",right:"left",bottom:"top",top:"bottom"};function fe(e){return e.replace(/left|right|bottom|top/g,(function(e){return se[e]}))}var ce={start:"end",end:"start"};function pe(e){return e.replace(/start|end/g,(function(e){return ce[e]}))}function ue(e,t){void 0===t&&(t={});var n=t,r=n.placement,o=n.boundary,i=n.rootBoundary,a=n.padding,s=n.flipVariations,f=n.allowedAutoPlacements,c=void 0===f?S:f,p=U(r),u=p?s?R:R.filter((function(e){return U(e)===p})):k,l=u.filter((function(e){return c.indexOf(e)>=0}));0===l.length&&(l=u);var d=l.reduce((function(t,n){return t[n]=J(e,{placement:n,boundary:o,rootBoundary:i,padding:a})[C(n)],t}),{});return Object.keys(d).sort((function(e,t){return d[e]-d[t]}))}var le={name:"flip",enabled:!0,phase:"main",fn:function(e){var t=e.state,n=e.options,r=e.name;if(!t.modifiersData[r]._skip){for(var o=n.mainAxis,i=void 0===o||o,a=n.altAxis,s=void 0===a||a,f=n.fallbackPlacements,c=n.padding,p=n.boundary,u=n.rootBoundary,l=n.altBoundary,d=n.flipVariations,h=void 0===d||d,m=n.allowedAutoPlacements,v=t.options.placement,y=C(v),g=f||(y===v||!h?[fe(v)]:function(e){if(C(e)===M)return[];var t=fe(e);return[pe(e),t,pe(t)]}(v)),b=[v].concat(g).reduce((function(e,n){return e.concat(C(n)===M?ue(t,{placement:n,boundary:p,rootBoundary:u,padding:c,flipVariations:h,allowedAutoPlacements:m}):n)}),[]),x=t.rects.reference,w=t.rects.popper,O=new Map,j=!0,E=b[0],k=0;k=0,S=R?"width":"height",V=J(t,{placement:B,boundary:p,rootBoundary:u,altBoundary:l,padding:c}),q=R?T?L:P:T?A:D;x[S]>w[S]&&(q=fe(q));var N=fe(q),I=[];if(i&&I.push(V[H]<=0),s&&I.push(V[q]<=0,V[N]<=0),I.every((function(e){return e}))){E=B,j=!1;break}O.set(B,I)}if(j)for(var _=function(e){var t=b.find((function(t){var n=O.get(t);if(n)return n.slice(0,e).every((function(e){return e}))}));if(t)return E=t,"break"},F=h?3:1;F>0;F--){if("break"===_(F))break}t.placement!==E&&(t.modifiersData[r]._skip=!0,t.placement=E,t.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function de(e,t,n){return i(e,a(t,n))}var he={name:"preventOverflow",enabled:!0,phase:"main",fn:function(e){var t=e.state,n=e.options,r=e.name,o=n.mainAxis,s=void 0===o||o,f=n.altAxis,c=void 0!==f&&f,p=n.boundary,u=n.rootBoundary,l=n.altBoundary,d=n.padding,h=n.tether,m=void 0===h||h,v=n.tetherOffset,y=void 0===v?0:v,b=J(t,{boundary:p,rootBoundary:u,padding:d,altBoundary:l}),x=C(t.placement),w=U(t.placement),O=!w,j=z(x),M="x"===j?"y":"x",k=t.modifiersData.popperOffsets,B=t.rects.reference,H=t.rects.popper,T="function"==typeof y?y(Object.assign({},t.rects,{placement:t.placement})):y,R="number"==typeof T?{mainAxis:T,altAxis:T}:Object.assign({mainAxis:0,altAxis:0},T),S=t.modifiersData.offset?t.modifiersData.offset[t.placement]:null,V={x:0,y:0};if(k){if(s){var q,N="y"===j?D:P,I="y"===j?A:L,_="y"===j?"height":"width",F=k[j],X=F+b[N],Y=F-b[I],G=m?-H[_]/2:0,K=w===W?B[_]:H[_],Q=w===W?-H[_]:-B[_],Z=t.elements.arrow,$=m&&Z?g(Z):{width:0,height:0},ee=t.modifiersData["arrow#persistent"]?t.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},te=ee[N],ne=ee[I],re=de(0,B[_],$[_]),oe=O?B[_]/2-G-re-te-R.mainAxis:K-re-te-R.mainAxis,ie=O?-B[_]/2+G+re+ne+R.mainAxis:Q+re+ne+R.mainAxis,ae=t.elements.arrow&&E(t.elements.arrow),se=ae?"y"===j?ae.clientTop||0:ae.clientLeft||0:0,fe=null!=(q=null==S?void 0:S[j])?q:0,ce=F+ie-fe,pe=de(m?a(X,F+oe-fe-se):X,F,m?i(Y,ce):Y);k[j]=pe,V[j]=pe-F}if(c){var ue,le="x"===j?D:P,he="x"===j?A:L,me=k[M],ve="y"===M?"height":"width",ye=me+b[le],ge=me-b[he],be=-1!==[D,P].indexOf(x),xe=null!=(ue=null==S?void 0:S[M])?ue:0,we=be?ye:me-B[ve]-H[ve]-xe+R.altAxis,Oe=be?me+B[ve]+H[ve]-xe-R.altAxis:ge,je=m&&be?function(e,t,n){var r=de(e,t,n);return r>n?n:r}(we,me,Oe):de(m?we:ye,me,m?Oe:ge);k[M]=je,V[M]=je-me}t.modifiersData[r]=V}},requiresIfExists:["offset"]};var me={name:"arrow",enabled:!0,phase:"main",fn:function(e){var t,n=e.state,r=e.name,o=e.options,i=n.elements.arrow,a=n.modifiersData.popperOffsets,s=C(n.placement),f=z(s),c=[P,L].indexOf(s)>=0?"height":"width";if(i&&a){var p=function(e,t){return Y("number"!=typeof(e="function"==typeof e?e(Object.assign({},t.rects,{placement:t.placement})):e)?e:G(e,k))}(o.padding,n),u=g(i),l="y"===f?D:P,d="y"===f?A:L,h=n.rects.reference[c]+n.rects.reference[f]-a[f]-n.rects.popper[c],m=a[f]-n.rects.reference[f],v=E(i),y=v?"y"===f?v.clientHeight||0:v.clientWidth||0:0,b=h/2-m/2,x=p[l],w=y-u[c]-p[d],O=y/2-u[c]/2+b,j=de(x,O,w),M=f;n.modifiersData[r]=((t={})[M]=j,t.centerOffset=j-O,t)}},effect:function(e){var t=e.state,n=e.options.element,r=void 0===n?"[data-popper-arrow]":n;null!=r&&("string"!=typeof r||(r=t.elements.popper.querySelector(r)))&&N(t.elements.popper,r)&&(t.elements.arrow=r)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function ve(e,t,n){return void 0===n&&(n={x:0,y:0}),{top:e.top-t.height-n.y,right:e.right-t.width+n.x,bottom:e.bottom-t.height+n.y,left:e.left-t.width-n.x}}function ye(e){return[D,L,A,P].some((function(t){return e[t]>=0}))}var ge={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(e){var t=e.state,n=e.name,r=t.rects.reference,o=t.rects.popper,i=t.modifiersData.preventOverflow,a=J(t,{elementContext:"reference"}),s=J(t,{altBoundary:!0}),f=ve(a,r),c=ve(s,o,i),p=ye(f),u=ye(c);t.modifiersData[n]={referenceClippingOffsets:f,popperEscapeOffsets:c,isReferenceHidden:p,hasPopperEscaped:u},t.attributes.popper=Object.assign({},t.attributes.popper,{"data-popper-reference-hidden":p,"data-popper-escaped":u})}},be=Z({defaultModifiers:[ee,te,oe,ie]}),xe=[ee,te,oe,ie,ae,le,he,me,ge],we=Z({defaultModifiers:xe});e.applyStyles=ie,e.arrow=me,e.computeStyles=oe,e.createPopper=we,e.createPopperLite=be,e.defaultModifiers=xe,e.detectOverflow=J,e.eventListeners=ee,e.flip=le,e.hide=ge,e.offset=ae,e.popperGenerator=Z,e.popperOffsets=te,e.preventOverflow=he,Object.defineProperty(e,"__esModule",{value:!0})})); + diff --git a/site_libs/quarto-html/quarto-syntax-highlighting.css b/site_libs/quarto-html/quarto-syntax-highlighting.css new file mode 100644 index 000000000..d9fd98f04 --- /dev/null +++ b/site_libs/quarto-html/quarto-syntax-highlighting.css @@ -0,0 +1,203 @@ +/* quarto syntax highlight colors */ +:root { + --quarto-hl-ot-color: #003B4F; + --quarto-hl-at-color: #657422; + --quarto-hl-ss-color: #20794D; + --quarto-hl-an-color: #5E5E5E; + --quarto-hl-fu-color: #4758AB; + --quarto-hl-st-color: #20794D; + --quarto-hl-cf-color: #003B4F; + --quarto-hl-op-color: #5E5E5E; + --quarto-hl-er-color: #AD0000; + --quarto-hl-bn-color: #AD0000; + --quarto-hl-al-color: #AD0000; + --quarto-hl-va-color: #111111; + --quarto-hl-bu-color: inherit; + --quarto-hl-ex-color: inherit; + --quarto-hl-pp-color: #AD0000; + --quarto-hl-in-color: #5E5E5E; + --quarto-hl-vs-color: #20794D; + --quarto-hl-wa-color: #5E5E5E; + --quarto-hl-do-color: #5E5E5E; + --quarto-hl-im-color: #00769E; + --quarto-hl-ch-color: #20794D; + --quarto-hl-dt-color: #AD0000; + --quarto-hl-fl-color: #AD0000; + --quarto-hl-co-color: #5E5E5E; + --quarto-hl-cv-color: #5E5E5E; + --quarto-hl-cn-color: #8f5902; + --quarto-hl-sc-color: #5E5E5E; + --quarto-hl-dv-color: #AD0000; + --quarto-hl-kw-color: #003B4F; +} + +/* other quarto variables */ +:root { + --quarto-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; +} + +pre > code.sourceCode > span { + color: #003B4F; +} + +code span { + color: #003B4F; +} + +code.sourceCode > span { + color: #003B4F; +} + +div.sourceCode, +div.sourceCode pre.sourceCode { + color: #003B4F; +} + +code span.ot { + color: #003B4F; + font-style: inherit; +} + +code span.at { + color: #657422; + font-style: inherit; +} + +code span.ss { + color: #20794D; + font-style: inherit; +} + +code span.an { + color: #5E5E5E; + font-style: inherit; +} + +code span.fu { + color: #4758AB; + font-style: inherit; +} + +code span.st { + color: #20794D; + font-style: inherit; +} + +code span.cf { + color: #003B4F; + font-style: inherit; +} + +code span.op { + color: #5E5E5E; + font-style: inherit; +} + +code span.er { + color: #AD0000; + font-style: inherit; +} + +code span.bn { + color: #AD0000; + font-style: inherit; +} + +code span.al { + color: #AD0000; + font-style: inherit; +} + +code span.va { + color: #111111; + font-style: inherit; +} + +code span.bu { + font-style: inherit; +} + +code span.ex { + font-style: inherit; +} + +code span.pp { + color: #AD0000; + font-style: inherit; +} + +code span.in { + color: #5E5E5E; + font-style: inherit; +} + +code span.vs { + color: #20794D; + font-style: inherit; +} + +code span.wa { + color: #5E5E5E; + font-style: italic; +} + +code span.do { + color: #5E5E5E; + font-style: italic; +} + +code span.im { + color: #00769E; + font-style: inherit; +} + +code span.ch { + color: #20794D; + font-style: inherit; +} + +code span.dt { + color: #AD0000; + font-style: inherit; +} + +code span.fl { + color: #AD0000; + font-style: inherit; +} + +code span.co { + color: #5E5E5E; + font-style: inherit; +} + +code span.cv { + color: #5E5E5E; + font-style: italic; +} + +code span.cn { + color: #8f5902; + font-style: inherit; +} + +code span.sc { + color: #5E5E5E; + font-style: inherit; +} + +code span.dv { + color: #AD0000; + font-style: inherit; +} + +code span.kw { + color: #003B4F; + font-style: inherit; +} + +.prevent-inlining { + content: " { + // Find any conflicting margin elements and add margins to the + // top to prevent overlap + const marginChildren = window.document.querySelectorAll( + ".column-margin.column-container > *, .margin-caption, .aside" + ); + + let lastBottom = 0; + for (const marginChild of marginChildren) { + if (marginChild.offsetParent !== null) { + // clear the top margin so we recompute it + marginChild.style.marginTop = null; + const top = marginChild.getBoundingClientRect().top + window.scrollY; + if (top < lastBottom) { + const marginChildStyle = window.getComputedStyle(marginChild); + const marginBottom = parseFloat(marginChildStyle["marginBottom"]); + const margin = lastBottom - top + marginBottom; + marginChild.style.marginTop = `${margin}px`; + } + const styles = window.getComputedStyle(marginChild); + const marginTop = parseFloat(styles["marginTop"]); + lastBottom = top + marginChild.getBoundingClientRect().height + marginTop; + } + } +}; + +window.document.addEventListener("DOMContentLoaded", function (_event) { + // Recompute the position of margin elements anytime the body size changes + if (window.ResizeObserver) { + const resizeObserver = new window.ResizeObserver( + throttle(() => { + layoutMarginEls(); + if ( + window.document.body.getBoundingClientRect().width < 990 && + isReaderMode() + ) { + quartoToggleReader(); + } + }, 50) + ); + resizeObserver.observe(window.document.body); + } + + const tocEl = window.document.querySelector('nav.toc-active[role="doc-toc"]'); + const sidebarEl = window.document.getElementById("quarto-sidebar"); + const leftTocEl = window.document.getElementById("quarto-sidebar-toc-left"); + const marginSidebarEl = window.document.getElementById( + "quarto-margin-sidebar" + ); + // function to determine whether the element has a previous sibling that is active + const prevSiblingIsActiveLink = (el) => { + const sibling = el.previousElementSibling; + if (sibling && sibling.tagName === "A") { + return sibling.classList.contains("active"); + } else { + return false; + } + }; + + // fire slideEnter for bootstrap tab activations (for htmlwidget resize behavior) + function fireSlideEnter(e) { + const event = window.document.createEvent("Event"); + event.initEvent("slideenter", true, true); + window.document.dispatchEvent(event); + } + const tabs = window.document.querySelectorAll('a[data-bs-toggle="tab"]'); + tabs.forEach((tab) => { + tab.addEventListener("shown.bs.tab", fireSlideEnter); + }); + + // fire slideEnter for tabby tab activations (for htmlwidget resize behavior) + document.addEventListener("tabby", fireSlideEnter, false); + + // Track scrolling and mark TOC links as active + // get table of contents and sidebar (bail if we don't have at least one) + const tocLinks = tocEl + ? [...tocEl.querySelectorAll("a[data-scroll-target]")] + : []; + const makeActive = (link) => tocLinks[link].classList.add("active"); + const removeActive = (link) => tocLinks[link].classList.remove("active"); + const removeAllActive = () => + [...Array(tocLinks.length).keys()].forEach((link) => removeActive(link)); + + // activate the anchor for a section associated with this TOC entry + tocLinks.forEach((link) => { + link.addEventListener("click", () => { + if (link.href.indexOf("#") !== -1) { + const anchor = link.href.split("#")[1]; + const heading = window.document.querySelector( + `[data-anchor-id=${anchor}]` + ); + if (heading) { + // Add the class + heading.classList.add("reveal-anchorjs-link"); + + // function to show the anchor + const handleMouseout = () => { + heading.classList.remove("reveal-anchorjs-link"); + heading.removeEventListener("mouseout", handleMouseout); + }; + + // add a function to clear the anchor when the user mouses out of it + heading.addEventListener("mouseout", handleMouseout); + } + } + }); + }); + + const sections = tocLinks.map((link) => { + const target = link.getAttribute("data-scroll-target"); + if (target.startsWith("#")) { + return window.document.getElementById(decodeURI(`${target.slice(1)}`)); + } else { + return window.document.querySelector(decodeURI(`${target}`)); + } + }); + + const sectionMargin = 200; + let currentActive = 0; + // track whether we've initialized state the first time + let init = false; + + const updateActiveLink = () => { + // The index from bottom to top (e.g. reversed list) + let sectionIndex = -1; + if ( + window.innerHeight + window.pageYOffset >= + window.document.body.offsetHeight + ) { + sectionIndex = 0; + } else { + sectionIndex = [...sections].reverse().findIndex((section) => { + if (section) { + return window.pageYOffset >= section.offsetTop - sectionMargin; + } else { + return false; + } + }); + } + if (sectionIndex > -1) { + const current = sections.length - sectionIndex - 1; + if (current !== currentActive) { + removeAllActive(); + currentActive = current; + makeActive(current); + if (init) { + window.dispatchEvent(sectionChanged); + } + init = true; + } + } + }; + + const inHiddenRegion = (top, bottom, hiddenRegions) => { + for (const region of hiddenRegions) { + if (top <= region.bottom && bottom >= region.top) { + return true; + } + } + return false; + }; + + const categorySelector = "header.quarto-title-block .quarto-category"; + const activateCategories = (href) => { + // Find any categories + // Surround them with a link pointing back to: + // #category=Authoring + try { + const categoryEls = window.document.querySelectorAll(categorySelector); + for (const categoryEl of categoryEls) { + const categoryText = categoryEl.textContent; + if (categoryText) { + const link = `${href}#category=${encodeURIComponent(categoryText)}`; + const linkEl = window.document.createElement("a"); + linkEl.setAttribute("href", link); + for (const child of categoryEl.childNodes) { + linkEl.append(child); + } + categoryEl.appendChild(linkEl); + } + } + } catch { + // Ignore errors + } + }; + function hasTitleCategories() { + return window.document.querySelector(categorySelector) !== null; + } + + function offsetRelativeUrl(url) { + const offset = getMeta("quarto:offset"); + return offset ? offset + url : url; + } + + function offsetAbsoluteUrl(url) { + const offset = getMeta("quarto:offset"); + const baseUrl = new URL(offset, window.location); + + const projRelativeUrl = url.replace(baseUrl, ""); + if (projRelativeUrl.startsWith("/")) { + return projRelativeUrl; + } else { + return "/" + projRelativeUrl; + } + } + + // read a meta tag value + function getMeta(metaName) { + const metas = window.document.getElementsByTagName("meta"); + for (let i = 0; i < metas.length; i++) { + if (metas[i].getAttribute("name") === metaName) { + return metas[i].getAttribute("content"); + } + } + return ""; + } + + async function findAndActivateCategories() { + const currentPagePath = offsetAbsoluteUrl(window.location.href); + const response = await fetch(offsetRelativeUrl("listings.json")); + if (response.status == 200) { + return response.json().then(function (listingPaths) { + const listingHrefs = []; + for (const listingPath of listingPaths) { + const pathWithoutLeadingSlash = listingPath.listing.substring(1); + for (const item of listingPath.items) { + if ( + item === currentPagePath || + item === currentPagePath + "index.html" + ) { + // Resolve this path against the offset to be sure + // we already are using the correct path to the listing + // (this adjusts the listing urls to be rooted against + // whatever root the page is actually running against) + const relative = offsetRelativeUrl(pathWithoutLeadingSlash); + const baseUrl = window.location; + const resolvedPath = new URL(relative, baseUrl); + listingHrefs.push(resolvedPath.pathname); + break; + } + } + } + + // Look up the tree for a nearby linting and use that if we find one + const nearestListing = findNearestParentListing( + offsetAbsoluteUrl(window.location.pathname), + listingHrefs + ); + if (nearestListing) { + activateCategories(nearestListing); + } else { + // See if the referrer is a listing page for this item + const referredRelativePath = offsetAbsoluteUrl(document.referrer); + const referrerListing = listingHrefs.find((listingHref) => { + const isListingReferrer = + listingHref === referredRelativePath || + listingHref === referredRelativePath + "index.html"; + return isListingReferrer; + }); + + if (referrerListing) { + // Try to use the referrer if possible + activateCategories(referrerListing); + } else if (listingHrefs.length > 0) { + // Otherwise, just fall back to the first listing + activateCategories(listingHrefs[0]); + } + } + }); + } + } + if (hasTitleCategories()) { + findAndActivateCategories(); + } + + const findNearestParentListing = (href, listingHrefs) => { + if (!href || !listingHrefs) { + return undefined; + } + // Look up the tree for a nearby linting and use that if we find one + const relativeParts = href.substring(1).split("/"); + while (relativeParts.length > 0) { + const path = relativeParts.join("/"); + for (const listingHref of listingHrefs) { + if (listingHref.startsWith(path)) { + return listingHref; + } + } + relativeParts.pop(); + } + + return undefined; + }; + + const manageSidebarVisiblity = (el, placeholderDescriptor) => { + let isVisible = true; + let elRect; + + return (hiddenRegions) => { + if (el === null) { + return; + } + + // Find the last element of the TOC + const lastChildEl = el.lastElementChild; + + if (lastChildEl) { + // Converts the sidebar to a menu + const convertToMenu = () => { + for (const child of el.children) { + child.style.opacity = 0; + child.style.overflow = "hidden"; + } + + nexttick(() => { + const toggleContainer = window.document.createElement("div"); + toggleContainer.style.width = "100%"; + toggleContainer.classList.add("zindex-over-content"); + toggleContainer.classList.add("quarto-sidebar-toggle"); + toggleContainer.classList.add("headroom-target"); // Marks this to be managed by headeroom + toggleContainer.id = placeholderDescriptor.id; + toggleContainer.style.position = "fixed"; + + const toggleIcon = window.document.createElement("i"); + toggleIcon.classList.add("quarto-sidebar-toggle-icon"); + toggleIcon.classList.add("bi"); + toggleIcon.classList.add("bi-caret-down-fill"); + + const toggleTitle = window.document.createElement("div"); + const titleEl = window.document.body.querySelector( + placeholderDescriptor.titleSelector + ); + if (titleEl) { + toggleTitle.append( + titleEl.textContent || titleEl.innerText, + toggleIcon + ); + } + toggleTitle.classList.add("zindex-over-content"); + toggleTitle.classList.add("quarto-sidebar-toggle-title"); + toggleContainer.append(toggleTitle); + + const toggleContents = window.document.createElement("div"); + toggleContents.classList = el.classList; + toggleContents.classList.add("zindex-over-content"); + toggleContents.classList.add("quarto-sidebar-toggle-contents"); + for (const child of el.children) { + if (child.id === "toc-title") { + continue; + } + + const clone = child.cloneNode(true); + clone.style.opacity = 1; + clone.style.display = null; + toggleContents.append(clone); + } + toggleContents.style.height = "0px"; + const positionToggle = () => { + // position the element (top left of parent, same width as parent) + if (!elRect) { + elRect = el.getBoundingClientRect(); + } + toggleContainer.style.left = `${elRect.left}px`; + toggleContainer.style.top = `${elRect.top}px`; + toggleContainer.style.width = `${elRect.width}px`; + }; + positionToggle(); + + toggleContainer.append(toggleContents); + el.parentElement.prepend(toggleContainer); + + // Process clicks + let tocShowing = false; + // Allow the caller to control whether this is dismissed + // when it is clicked (e.g. sidebar navigation supports + // opening and closing the nav tree, so don't dismiss on click) + const clickEl = placeholderDescriptor.dismissOnClick + ? toggleContainer + : toggleTitle; + + const closeToggle = () => { + if (tocShowing) { + toggleContainer.classList.remove("expanded"); + toggleContents.style.height = "0px"; + tocShowing = false; + } + }; + + // Get rid of any expanded toggle if the user scrolls + window.document.addEventListener( + "scroll", + throttle(() => { + closeToggle(); + }, 50) + ); + + // Handle positioning of the toggle + window.addEventListener( + "resize", + throttle(() => { + elRect = undefined; + positionToggle(); + }, 50) + ); + + window.addEventListener("quarto-hrChanged", () => { + elRect = undefined; + }); + + // Process the click + clickEl.onclick = () => { + if (!tocShowing) { + toggleContainer.classList.add("expanded"); + toggleContents.style.height = null; + tocShowing = true; + } else { + closeToggle(); + } + }; + }); + }; + + // Converts a sidebar from a menu back to a sidebar + const convertToSidebar = () => { + for (const child of el.children) { + child.style.opacity = 1; + child.style.overflow = null; + } + + const placeholderEl = window.document.getElementById( + placeholderDescriptor.id + ); + if (placeholderEl) { + placeholderEl.remove(); + } + + el.classList.remove("rollup"); + }; + + if (isReaderMode()) { + convertToMenu(); + isVisible = false; + } else { + // Find the top and bottom o the element that is being managed + const elTop = el.offsetTop; + const elBottom = + elTop + lastChildEl.offsetTop + lastChildEl.offsetHeight; + + if (!isVisible) { + // If the element is current not visible reveal if there are + // no conflicts with overlay regions + if (!inHiddenRegion(elTop, elBottom, hiddenRegions)) { + convertToSidebar(); + isVisible = true; + } + } else { + // If the element is visible, hide it if it conflicts with overlay regions + // and insert a placeholder toggle (or if we're in reader mode) + if (inHiddenRegion(elTop, elBottom, hiddenRegions)) { + convertToMenu(); + isVisible = false; + } + } + } + } + }; + }; + + const tabEls = document.querySelectorAll('a[data-bs-toggle="tab"]'); + for (const tabEl of tabEls) { + const id = tabEl.getAttribute("data-bs-target"); + if (id) { + const columnEl = document.querySelector( + `${id} .column-margin, .tabset-margin-content` + ); + if (columnEl) + tabEl.addEventListener("shown.bs.tab", function (event) { + const el = event.srcElement; + if (el) { + const visibleCls = `${el.id}-margin-content`; + // walk up until we find a parent tabset + let panelTabsetEl = el.parentElement; + while (panelTabsetEl) { + if (panelTabsetEl.classList.contains("panel-tabset")) { + break; + } + panelTabsetEl = panelTabsetEl.parentElement; + } + + if (panelTabsetEl) { + const prevSib = panelTabsetEl.previousElementSibling; + if ( + prevSib && + prevSib.classList.contains("tabset-margin-container") + ) { + const childNodes = prevSib.querySelectorAll( + ".tabset-margin-content" + ); + for (const childEl of childNodes) { + if (childEl.classList.contains(visibleCls)) { + childEl.classList.remove("collapse"); + } else { + childEl.classList.add("collapse"); + } + } + } + } + } + + layoutMarginEls(); + }); + } + } + + // Manage the visibility of the toc and the sidebar + const marginScrollVisibility = manageSidebarVisiblity(marginSidebarEl, { + id: "quarto-toc-toggle", + titleSelector: "#toc-title", + dismissOnClick: true, + }); + const sidebarScrollVisiblity = manageSidebarVisiblity(sidebarEl, { + id: "quarto-sidebarnav-toggle", + titleSelector: ".title", + dismissOnClick: false, + }); + let tocLeftScrollVisibility; + if (leftTocEl) { + tocLeftScrollVisibility = manageSidebarVisiblity(leftTocEl, { + id: "quarto-lefttoc-toggle", + titleSelector: "#toc-title", + dismissOnClick: true, + }); + } + + // Find the first element that uses formatting in special columns + const conflictingEls = window.document.body.querySelectorAll( + '[class^="column-"], [class*=" column-"], aside, [class*="margin-caption"], [class*=" margin-caption"], [class*="margin-ref"], [class*=" margin-ref"]' + ); + + // Filter all the possibly conflicting elements into ones + // the do conflict on the left or ride side + const arrConflictingEls = Array.from(conflictingEls); + const leftSideConflictEls = arrConflictingEls.filter((el) => { + if (el.tagName === "ASIDE") { + return false; + } + return Array.from(el.classList).find((className) => { + return ( + className !== "column-body" && + className.startsWith("column-") && + !className.endsWith("right") && + !className.endsWith("container") && + className !== "column-margin" + ); + }); + }); + const rightSideConflictEls = arrConflictingEls.filter((el) => { + if (el.tagName === "ASIDE") { + return true; + } + + const hasMarginCaption = Array.from(el.classList).find((className) => { + return className == "margin-caption"; + }); + if (hasMarginCaption) { + return true; + } + + return Array.from(el.classList).find((className) => { + return ( + className !== "column-body" && + !className.endsWith("container") && + className.startsWith("column-") && + !className.endsWith("left") + ); + }); + }); + + const kOverlapPaddingSize = 10; + function toRegions(els) { + return els.map((el) => { + const boundRect = el.getBoundingClientRect(); + const top = + boundRect.top + + document.documentElement.scrollTop - + kOverlapPaddingSize; + return { + top, + bottom: top + el.scrollHeight + 2 * kOverlapPaddingSize, + }; + }); + } + + let hasObserved = false; + const visibleItemObserver = (els) => { + let visibleElements = [...els]; + const intersectionObserver = new IntersectionObserver( + (entries, _observer) => { + entries.forEach((entry) => { + if (entry.isIntersecting) { + if (visibleElements.indexOf(entry.target) === -1) { + visibleElements.push(entry.target); + } + } else { + visibleElements = visibleElements.filter((visibleEntry) => { + return visibleEntry !== entry; + }); + } + }); + + if (!hasObserved) { + hideOverlappedSidebars(); + } + hasObserved = true; + }, + {} + ); + els.forEach((el) => { + intersectionObserver.observe(el); + }); + + return { + getVisibleEntries: () => { + return visibleElements; + }, + }; + }; + + const rightElementObserver = visibleItemObserver(rightSideConflictEls); + const leftElementObserver = visibleItemObserver(leftSideConflictEls); + + const hideOverlappedSidebars = () => { + marginScrollVisibility(toRegions(rightElementObserver.getVisibleEntries())); + sidebarScrollVisiblity(toRegions(leftElementObserver.getVisibleEntries())); + if (tocLeftScrollVisibility) { + tocLeftScrollVisibility( + toRegions(leftElementObserver.getVisibleEntries()) + ); + } + }; + + window.quartoToggleReader = () => { + // Applies a slow class (or removes it) + // to update the transition speed + const slowTransition = (slow) => { + const manageTransition = (id, slow) => { + const el = document.getElementById(id); + if (el) { + if (slow) { + el.classList.add("slow"); + } else { + el.classList.remove("slow"); + } + } + }; + + manageTransition("TOC", slow); + manageTransition("quarto-sidebar", slow); + }; + const readerMode = !isReaderMode(); + setReaderModeValue(readerMode); + + // If we're entering reader mode, slow the transition + if (readerMode) { + slowTransition(readerMode); + } + highlightReaderToggle(readerMode); + hideOverlappedSidebars(); + + // If we're exiting reader mode, restore the non-slow transition + if (!readerMode) { + slowTransition(!readerMode); + } + }; + + const highlightReaderToggle = (readerMode) => { + const els = document.querySelectorAll(".quarto-reader-toggle"); + if (els) { + els.forEach((el) => { + if (readerMode) { + el.classList.add("reader"); + } else { + el.classList.remove("reader"); + } + }); + } + }; + + const setReaderModeValue = (val) => { + if (window.location.protocol !== "file:") { + window.localStorage.setItem("quarto-reader-mode", val); + } else { + localReaderMode = val; + } + }; + + const isReaderMode = () => { + if (window.location.protocol !== "file:") { + return window.localStorage.getItem("quarto-reader-mode") === "true"; + } else { + return localReaderMode; + } + }; + let localReaderMode = null; + + const tocOpenDepthStr = tocEl?.getAttribute("data-toc-expanded"); + const tocOpenDepth = tocOpenDepthStr ? Number(tocOpenDepthStr) : 1; + + // Walk the TOC and collapse/expand nodes + // Nodes are expanded if: + // - they are top level + // - they have children that are 'active' links + // - they are directly below an link that is 'active' + const walk = (el, depth) => { + // Tick depth when we enter a UL + if (el.tagName === "UL") { + depth = depth + 1; + } + + // It this is active link + let isActiveNode = false; + if (el.tagName === "A" && el.classList.contains("active")) { + isActiveNode = true; + } + + // See if there is an active child to this element + let hasActiveChild = false; + for (child of el.children) { + hasActiveChild = walk(child, depth) || hasActiveChild; + } + + // Process the collapse state if this is an UL + if (el.tagName === "UL") { + if (tocOpenDepth === -1 && depth > 1) { + el.classList.add("collapse"); + } else if ( + depth <= tocOpenDepth || + hasActiveChild || + prevSiblingIsActiveLink(el) + ) { + el.classList.remove("collapse"); + } else { + el.classList.add("collapse"); + } + + // untick depth when we leave a UL + depth = depth - 1; + } + return hasActiveChild || isActiveNode; + }; + + // walk the TOC and expand / collapse any items that should be shown + + if (tocEl) { + walk(tocEl, 0); + updateActiveLink(); + } + + // Throttle the scroll event and walk peridiocally + window.document.addEventListener( + "scroll", + throttle(() => { + if (tocEl) { + updateActiveLink(); + walk(tocEl, 0); + } + if (!isReaderMode()) { + hideOverlappedSidebars(); + } + }, 5) + ); + window.addEventListener( + "resize", + throttle(() => { + if (!isReaderMode()) { + hideOverlappedSidebars(); + } + }, 10) + ); + hideOverlappedSidebars(); + highlightReaderToggle(isReaderMode()); +}); + +// grouped tabsets +window.addEventListener("pageshow", (_event) => { + function getTabSettings() { + const data = localStorage.getItem("quarto-persistent-tabsets-data"); + if (!data) { + localStorage.setItem("quarto-persistent-tabsets-data", "{}"); + return {}; + } + if (data) { + return JSON.parse(data); + } + } + + function setTabSettings(data) { + localStorage.setItem( + "quarto-persistent-tabsets-data", + JSON.stringify(data) + ); + } + + function setTabState(groupName, groupValue) { + const data = getTabSettings(); + data[groupName] = groupValue; + setTabSettings(data); + } + + function toggleTab(tab, active) { + const tabPanelId = tab.getAttribute("aria-controls"); + const tabPanel = document.getElementById(tabPanelId); + if (active) { + tab.classList.add("active"); + tabPanel.classList.add("active"); + } else { + tab.classList.remove("active"); + tabPanel.classList.remove("active"); + } + } + + function toggleAll(selectedGroup, selectorsToSync) { + for (const [thisGroup, tabs] of Object.entries(selectorsToSync)) { + const active = selectedGroup === thisGroup; + for (const tab of tabs) { + toggleTab(tab, active); + } + } + } + + function findSelectorsToSyncByLanguage() { + const result = {}; + const tabs = Array.from( + document.querySelectorAll(`div[data-group] a[id^='tabset-']`) + ); + for (const item of tabs) { + const div = item.parentElement.parentElement.parentElement; + const group = div.getAttribute("data-group"); + if (!result[group]) { + result[group] = {}; + } + const selectorsToSync = result[group]; + const value = item.innerHTML; + if (!selectorsToSync[value]) { + selectorsToSync[value] = []; + } + selectorsToSync[value].push(item); + } + return result; + } + + function setupSelectorSync() { + const selectorsToSync = findSelectorsToSyncByLanguage(); + Object.entries(selectorsToSync).forEach(([group, tabSetsByValue]) => { + Object.entries(tabSetsByValue).forEach(([value, items]) => { + items.forEach((item) => { + item.addEventListener("click", (_event) => { + setTabState(group, value); + toggleAll(value, selectorsToSync[group]); + }); + }); + }); + }); + return selectorsToSync; + } + + const selectorsToSync = setupSelectorSync(); + for (const [group, selectedName] of Object.entries(getTabSettings())) { + const selectors = selectorsToSync[group]; + // it's possible that stale state gives us empty selections, so we explicitly check here. + if (selectors) { + toggleAll(selectedName, selectors); + } + } +}); + +function throttle(func, wait) { + let waiting = false; + return function () { + if (!waiting) { + func.apply(this, arguments); + waiting = true; + setTimeout(function () { + waiting = false; + }, wait); + } + }; +} + +function nexttick(func) { + return setTimeout(func, 0); +} diff --git a/site_libs/quarto-html/tippy.css b/site_libs/quarto-html/tippy.css new file mode 100644 index 000000000..e6ae635cb --- /dev/null +++ b/site_libs/quarto-html/tippy.css @@ -0,0 +1 @@ +.tippy-box[data-animation=fade][data-state=hidden]{opacity:0}[data-tippy-root]{max-width:calc(100vw - 10px)}.tippy-box{position:relative;background-color:#333;color:#fff;border-radius:4px;font-size:14px;line-height:1.4;white-space:normal;outline:0;transition-property:transform,visibility,opacity}.tippy-box[data-placement^=top]>.tippy-arrow{bottom:0}.tippy-box[data-placement^=top]>.tippy-arrow:before{bottom:-7px;left:0;border-width:8px 8px 0;border-top-color:initial;transform-origin:center top}.tippy-box[data-placement^=bottom]>.tippy-arrow{top:0}.tippy-box[data-placement^=bottom]>.tippy-arrow:before{top:-7px;left:0;border-width:0 8px 8px;border-bottom-color:initial;transform-origin:center bottom}.tippy-box[data-placement^=left]>.tippy-arrow{right:0}.tippy-box[data-placement^=left]>.tippy-arrow:before{border-width:8px 0 8px 8px;border-left-color:initial;right:-7px;transform-origin:center left}.tippy-box[data-placement^=right]>.tippy-arrow{left:0}.tippy-box[data-placement^=right]>.tippy-arrow:before{left:-7px;border-width:8px 8px 8px 0;border-right-color:initial;transform-origin:center right}.tippy-box[data-inertia][data-state=visible]{transition-timing-function:cubic-bezier(.54,1.5,.38,1.11)}.tippy-arrow{width:16px;height:16px;color:#333}.tippy-arrow:before{content:"";position:absolute;border-color:transparent;border-style:solid}.tippy-content{position:relative;padding:5px 9px;z-index:1} \ No newline at end of file diff --git a/site_libs/quarto-html/tippy.umd.min.js b/site_libs/quarto-html/tippy.umd.min.js new file mode 100644 index 000000000..ca292be32 --- /dev/null +++ b/site_libs/quarto-html/tippy.umd.min.js @@ -0,0 +1,2 @@ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t(require("@popperjs/core")):"function"==typeof define&&define.amd?define(["@popperjs/core"],t):(e=e||self).tippy=t(e.Popper)}(this,(function(e){"use strict";var t={passive:!0,capture:!0},n=function(){return document.body};function r(e,t,n){if(Array.isArray(e)){var r=e[t];return null==r?Array.isArray(n)?n[t]:n:r}return e}function o(e,t){var n={}.toString.call(e);return 0===n.indexOf("[object")&&n.indexOf(t+"]")>-1}function i(e,t){return"function"==typeof e?e.apply(void 0,t):e}function a(e,t){return 0===t?e:function(r){clearTimeout(n),n=setTimeout((function(){e(r)}),t)};var n}function s(e,t){var n=Object.assign({},e);return t.forEach((function(e){delete n[e]})),n}function u(e){return[].concat(e)}function c(e,t){-1===e.indexOf(t)&&e.push(t)}function p(e){return e.split("-")[0]}function f(e){return[].slice.call(e)}function l(e){return Object.keys(e).reduce((function(t,n){return void 0!==e[n]&&(t[n]=e[n]),t}),{})}function d(){return document.createElement("div")}function v(e){return["Element","Fragment"].some((function(t){return o(e,t)}))}function m(e){return o(e,"MouseEvent")}function g(e){return!(!e||!e._tippy||e._tippy.reference!==e)}function h(e){return v(e)?[e]:function(e){return o(e,"NodeList")}(e)?f(e):Array.isArray(e)?e:f(document.querySelectorAll(e))}function b(e,t){e.forEach((function(e){e&&(e.style.transitionDuration=t+"ms")}))}function y(e,t){e.forEach((function(e){e&&e.setAttribute("data-state",t)}))}function w(e){var t,n=u(e)[0];return null!=n&&null!=(t=n.ownerDocument)&&t.body?n.ownerDocument:document}function E(e,t,n){var r=t+"EventListener";["transitionend","webkitTransitionEnd"].forEach((function(t){e[r](t,n)}))}function O(e,t){for(var n=t;n;){var r;if(e.contains(n))return!0;n=null==n.getRootNode||null==(r=n.getRootNode())?void 0:r.host}return!1}var x={isTouch:!1},C=0;function T(){x.isTouch||(x.isTouch=!0,window.performance&&document.addEventListener("mousemove",A))}function A(){var e=performance.now();e-C<20&&(x.isTouch=!1,document.removeEventListener("mousemove",A)),C=e}function L(){var e=document.activeElement;if(g(e)){var t=e._tippy;e.blur&&!t.state.isVisible&&e.blur()}}var D=!!("undefined"!=typeof window&&"undefined"!=typeof document)&&!!window.msCrypto,R=Object.assign({appendTo:n,aria:{content:"auto",expanded:"auto"},delay:0,duration:[300,250],getReferenceClientRect:null,hideOnClick:!0,ignoreAttributes:!1,interactive:!1,interactiveBorder:2,interactiveDebounce:0,moveTransition:"",offset:[0,10],onAfterUpdate:function(){},onBeforeUpdate:function(){},onCreate:function(){},onDestroy:function(){},onHidden:function(){},onHide:function(){},onMount:function(){},onShow:function(){},onShown:function(){},onTrigger:function(){},onUntrigger:function(){},onClickOutside:function(){},placement:"top",plugins:[],popperOptions:{},render:null,showOnCreate:!1,touch:!0,trigger:"mouseenter focus",triggerTarget:null},{animateFill:!1,followCursor:!1,inlinePositioning:!1,sticky:!1},{allowHTML:!1,animation:"fade",arrow:!0,content:"",inertia:!1,maxWidth:350,role:"tooltip",theme:"",zIndex:9999}),k=Object.keys(R);function P(e){var t=(e.plugins||[]).reduce((function(t,n){var r,o=n.name,i=n.defaultValue;o&&(t[o]=void 0!==e[o]?e[o]:null!=(r=R[o])?r:i);return t}),{});return Object.assign({},e,t)}function j(e,t){var n=Object.assign({},t,{content:i(t.content,[e])},t.ignoreAttributes?{}:function(e,t){return(t?Object.keys(P(Object.assign({},R,{plugins:t}))):k).reduce((function(t,n){var r=(e.getAttribute("data-tippy-"+n)||"").trim();if(!r)return t;if("content"===n)t[n]=r;else try{t[n]=JSON.parse(r)}catch(e){t[n]=r}return t}),{})}(e,t.plugins));return n.aria=Object.assign({},R.aria,n.aria),n.aria={expanded:"auto"===n.aria.expanded?t.interactive:n.aria.expanded,content:"auto"===n.aria.content?t.interactive?null:"describedby":n.aria.content},n}function M(e,t){e.innerHTML=t}function V(e){var t=d();return!0===e?t.className="tippy-arrow":(t.className="tippy-svg-arrow",v(e)?t.appendChild(e):M(t,e)),t}function I(e,t){v(t.content)?(M(e,""),e.appendChild(t.content)):"function"!=typeof t.content&&(t.allowHTML?M(e,t.content):e.textContent=t.content)}function S(e){var t=e.firstElementChild,n=f(t.children);return{box:t,content:n.find((function(e){return e.classList.contains("tippy-content")})),arrow:n.find((function(e){return e.classList.contains("tippy-arrow")||e.classList.contains("tippy-svg-arrow")})),backdrop:n.find((function(e){return e.classList.contains("tippy-backdrop")}))}}function N(e){var t=d(),n=d();n.className="tippy-box",n.setAttribute("data-state","hidden"),n.setAttribute("tabindex","-1");var r=d();function o(n,r){var o=S(t),i=o.box,a=o.content,s=o.arrow;r.theme?i.setAttribute("data-theme",r.theme):i.removeAttribute("data-theme"),"string"==typeof r.animation?i.setAttribute("data-animation",r.animation):i.removeAttribute("data-animation"),r.inertia?i.setAttribute("data-inertia",""):i.removeAttribute("data-inertia"),i.style.maxWidth="number"==typeof r.maxWidth?r.maxWidth+"px":r.maxWidth,r.role?i.setAttribute("role",r.role):i.removeAttribute("role"),n.content===r.content&&n.allowHTML===r.allowHTML||I(a,e.props),r.arrow?s?n.arrow!==r.arrow&&(i.removeChild(s),i.appendChild(V(r.arrow))):i.appendChild(V(r.arrow)):s&&i.removeChild(s)}return r.className="tippy-content",r.setAttribute("data-state","hidden"),I(r,e.props),t.appendChild(n),n.appendChild(r),o(e.props,e.props),{popper:t,onUpdate:o}}N.$$tippy=!0;var B=1,H=[],U=[];function _(o,s){var v,g,h,C,T,A,L,k,M=j(o,Object.assign({},R,P(l(s)))),V=!1,I=!1,N=!1,_=!1,F=[],W=a(we,M.interactiveDebounce),X=B++,Y=(k=M.plugins).filter((function(e,t){return k.indexOf(e)===t})),$={id:X,reference:o,popper:d(),popperInstance:null,props:M,state:{isEnabled:!0,isVisible:!1,isDestroyed:!1,isMounted:!1,isShown:!1},plugins:Y,clearDelayTimeouts:function(){clearTimeout(v),clearTimeout(g),cancelAnimationFrame(h)},setProps:function(e){if($.state.isDestroyed)return;ae("onBeforeUpdate",[$,e]),be();var t=$.props,n=j(o,Object.assign({},t,l(e),{ignoreAttributes:!0}));$.props=n,he(),t.interactiveDebounce!==n.interactiveDebounce&&(ce(),W=a(we,n.interactiveDebounce));t.triggerTarget&&!n.triggerTarget?u(t.triggerTarget).forEach((function(e){e.removeAttribute("aria-expanded")})):n.triggerTarget&&o.removeAttribute("aria-expanded");ue(),ie(),J&&J(t,n);$.popperInstance&&(Ce(),Ae().forEach((function(e){requestAnimationFrame(e._tippy.popperInstance.forceUpdate)})));ae("onAfterUpdate",[$,e])},setContent:function(e){$.setProps({content:e})},show:function(){var e=$.state.isVisible,t=$.state.isDestroyed,o=!$.state.isEnabled,a=x.isTouch&&!$.props.touch,s=r($.props.duration,0,R.duration);if(e||t||o||a)return;if(te().hasAttribute("disabled"))return;if(ae("onShow",[$],!1),!1===$.props.onShow($))return;$.state.isVisible=!0,ee()&&(z.style.visibility="visible");ie(),de(),$.state.isMounted||(z.style.transition="none");if(ee()){var u=re(),p=u.box,f=u.content;b([p,f],0)}A=function(){var e;if($.state.isVisible&&!_){if(_=!0,z.offsetHeight,z.style.transition=$.props.moveTransition,ee()&&$.props.animation){var t=re(),n=t.box,r=t.content;b([n,r],s),y([n,r],"visible")}se(),ue(),c(U,$),null==(e=$.popperInstance)||e.forceUpdate(),ae("onMount",[$]),$.props.animation&&ee()&&function(e,t){me(e,t)}(s,(function(){$.state.isShown=!0,ae("onShown",[$])}))}},function(){var e,t=$.props.appendTo,r=te();e=$.props.interactive&&t===n||"parent"===t?r.parentNode:i(t,[r]);e.contains(z)||e.appendChild(z);$.state.isMounted=!0,Ce()}()},hide:function(){var e=!$.state.isVisible,t=$.state.isDestroyed,n=!$.state.isEnabled,o=r($.props.duration,1,R.duration);if(e||t||n)return;if(ae("onHide",[$],!1),!1===$.props.onHide($))return;$.state.isVisible=!1,$.state.isShown=!1,_=!1,V=!1,ee()&&(z.style.visibility="hidden");if(ce(),ve(),ie(!0),ee()){var i=re(),a=i.box,s=i.content;$.props.animation&&(b([a,s],o),y([a,s],"hidden"))}se(),ue(),$.props.animation?ee()&&function(e,t){me(e,(function(){!$.state.isVisible&&z.parentNode&&z.parentNode.contains(z)&&t()}))}(o,$.unmount):$.unmount()},hideWithInteractivity:function(e){ne().addEventListener("mousemove",W),c(H,W),W(e)},enable:function(){$.state.isEnabled=!0},disable:function(){$.hide(),$.state.isEnabled=!1},unmount:function(){$.state.isVisible&&$.hide();if(!$.state.isMounted)return;Te(),Ae().forEach((function(e){e._tippy.unmount()})),z.parentNode&&z.parentNode.removeChild(z);U=U.filter((function(e){return e!==$})),$.state.isMounted=!1,ae("onHidden",[$])},destroy:function(){if($.state.isDestroyed)return;$.clearDelayTimeouts(),$.unmount(),be(),delete o._tippy,$.state.isDestroyed=!0,ae("onDestroy",[$])}};if(!M.render)return $;var q=M.render($),z=q.popper,J=q.onUpdate;z.setAttribute("data-tippy-root",""),z.id="tippy-"+$.id,$.popper=z,o._tippy=$,z._tippy=$;var G=Y.map((function(e){return e.fn($)})),K=o.hasAttribute("aria-expanded");return he(),ue(),ie(),ae("onCreate",[$]),M.showOnCreate&&Le(),z.addEventListener("mouseenter",(function(){$.props.interactive&&$.state.isVisible&&$.clearDelayTimeouts()})),z.addEventListener("mouseleave",(function(){$.props.interactive&&$.props.trigger.indexOf("mouseenter")>=0&&ne().addEventListener("mousemove",W)})),$;function Q(){var e=$.props.touch;return Array.isArray(e)?e:[e,0]}function Z(){return"hold"===Q()[0]}function ee(){var e;return!(null==(e=$.props.render)||!e.$$tippy)}function te(){return L||o}function ne(){var e=te().parentNode;return e?w(e):document}function re(){return S(z)}function oe(e){return $.state.isMounted&&!$.state.isVisible||x.isTouch||C&&"focus"===C.type?0:r($.props.delay,e?0:1,R.delay)}function ie(e){void 0===e&&(e=!1),z.style.pointerEvents=$.props.interactive&&!e?"":"none",z.style.zIndex=""+$.props.zIndex}function ae(e,t,n){var r;(void 0===n&&(n=!0),G.forEach((function(n){n[e]&&n[e].apply(n,t)})),n)&&(r=$.props)[e].apply(r,t)}function se(){var e=$.props.aria;if(e.content){var t="aria-"+e.content,n=z.id;u($.props.triggerTarget||o).forEach((function(e){var r=e.getAttribute(t);if($.state.isVisible)e.setAttribute(t,r?r+" "+n:n);else{var o=r&&r.replace(n,"").trim();o?e.setAttribute(t,o):e.removeAttribute(t)}}))}}function ue(){!K&&$.props.aria.expanded&&u($.props.triggerTarget||o).forEach((function(e){$.props.interactive?e.setAttribute("aria-expanded",$.state.isVisible&&e===te()?"true":"false"):e.removeAttribute("aria-expanded")}))}function ce(){ne().removeEventListener("mousemove",W),H=H.filter((function(e){return e!==W}))}function pe(e){if(!x.isTouch||!N&&"mousedown"!==e.type){var t=e.composedPath&&e.composedPath()[0]||e.target;if(!$.props.interactive||!O(z,t)){if(u($.props.triggerTarget||o).some((function(e){return O(e,t)}))){if(x.isTouch)return;if($.state.isVisible&&$.props.trigger.indexOf("click")>=0)return}else ae("onClickOutside",[$,e]);!0===$.props.hideOnClick&&($.clearDelayTimeouts(),$.hide(),I=!0,setTimeout((function(){I=!1})),$.state.isMounted||ve())}}}function fe(){N=!0}function le(){N=!1}function de(){var e=ne();e.addEventListener("mousedown",pe,!0),e.addEventListener("touchend",pe,t),e.addEventListener("touchstart",le,t),e.addEventListener("touchmove",fe,t)}function ve(){var e=ne();e.removeEventListener("mousedown",pe,!0),e.removeEventListener("touchend",pe,t),e.removeEventListener("touchstart",le,t),e.removeEventListener("touchmove",fe,t)}function me(e,t){var n=re().box;function r(e){e.target===n&&(E(n,"remove",r),t())}if(0===e)return t();E(n,"remove",T),E(n,"add",r),T=r}function ge(e,t,n){void 0===n&&(n=!1),u($.props.triggerTarget||o).forEach((function(r){r.addEventListener(e,t,n),F.push({node:r,eventType:e,handler:t,options:n})}))}function he(){var e;Z()&&(ge("touchstart",ye,{passive:!0}),ge("touchend",Ee,{passive:!0})),(e=$.props.trigger,e.split(/\s+/).filter(Boolean)).forEach((function(e){if("manual"!==e)switch(ge(e,ye),e){case"mouseenter":ge("mouseleave",Ee);break;case"focus":ge(D?"focusout":"blur",Oe);break;case"focusin":ge("focusout",Oe)}}))}function be(){F.forEach((function(e){var t=e.node,n=e.eventType,r=e.handler,o=e.options;t.removeEventListener(n,r,o)})),F=[]}function ye(e){var t,n=!1;if($.state.isEnabled&&!xe(e)&&!I){var r="focus"===(null==(t=C)?void 0:t.type);C=e,L=e.currentTarget,ue(),!$.state.isVisible&&m(e)&&H.forEach((function(t){return t(e)})),"click"===e.type&&($.props.trigger.indexOf("mouseenter")<0||V)&&!1!==$.props.hideOnClick&&$.state.isVisible?n=!0:Le(e),"click"===e.type&&(V=!n),n&&!r&&De(e)}}function we(e){var t=e.target,n=te().contains(t)||z.contains(t);"mousemove"===e.type&&n||function(e,t){var n=t.clientX,r=t.clientY;return e.every((function(e){var t=e.popperRect,o=e.popperState,i=e.props.interactiveBorder,a=p(o.placement),s=o.modifiersData.offset;if(!s)return!0;var u="bottom"===a?s.top.y:0,c="top"===a?s.bottom.y:0,f="right"===a?s.left.x:0,l="left"===a?s.right.x:0,d=t.top-r+u>i,v=r-t.bottom-c>i,m=t.left-n+f>i,g=n-t.right-l>i;return d||v||m||g}))}(Ae().concat(z).map((function(e){var t,n=null==(t=e._tippy.popperInstance)?void 0:t.state;return n?{popperRect:e.getBoundingClientRect(),popperState:n,props:M}:null})).filter(Boolean),e)&&(ce(),De(e))}function Ee(e){xe(e)||$.props.trigger.indexOf("click")>=0&&V||($.props.interactive?$.hideWithInteractivity(e):De(e))}function Oe(e){$.props.trigger.indexOf("focusin")<0&&e.target!==te()||$.props.interactive&&e.relatedTarget&&z.contains(e.relatedTarget)||De(e)}function xe(e){return!!x.isTouch&&Z()!==e.type.indexOf("touch")>=0}function Ce(){Te();var t=$.props,n=t.popperOptions,r=t.placement,i=t.offset,a=t.getReferenceClientRect,s=t.moveTransition,u=ee()?S(z).arrow:null,c=a?{getBoundingClientRect:a,contextElement:a.contextElement||te()}:o,p=[{name:"offset",options:{offset:i}},{name:"preventOverflow",options:{padding:{top:2,bottom:2,left:5,right:5}}},{name:"flip",options:{padding:5}},{name:"computeStyles",options:{adaptive:!s}},{name:"$$tippy",enabled:!0,phase:"beforeWrite",requires:["computeStyles"],fn:function(e){var t=e.state;if(ee()){var n=re().box;["placement","reference-hidden","escaped"].forEach((function(e){"placement"===e?n.setAttribute("data-placement",t.placement):t.attributes.popper["data-popper-"+e]?n.setAttribute("data-"+e,""):n.removeAttribute("data-"+e)})),t.attributes.popper={}}}}];ee()&&u&&p.push({name:"arrow",options:{element:u,padding:3}}),p.push.apply(p,(null==n?void 0:n.modifiers)||[]),$.popperInstance=e.createPopper(c,z,Object.assign({},n,{placement:r,onFirstUpdate:A,modifiers:p}))}function Te(){$.popperInstance&&($.popperInstance.destroy(),$.popperInstance=null)}function Ae(){return f(z.querySelectorAll("[data-tippy-root]"))}function Le(e){$.clearDelayTimeouts(),e&&ae("onTrigger",[$,e]),de();var t=oe(!0),n=Q(),r=n[0],o=n[1];x.isTouch&&"hold"===r&&o&&(t=o),t?v=setTimeout((function(){$.show()}),t):$.show()}function De(e){if($.clearDelayTimeouts(),ae("onUntrigger",[$,e]),$.state.isVisible){if(!($.props.trigger.indexOf("mouseenter")>=0&&$.props.trigger.indexOf("click")>=0&&["mouseleave","mousemove"].indexOf(e.type)>=0&&V)){var t=oe(!1);t?g=setTimeout((function(){$.state.isVisible&&$.hide()}),t):h=requestAnimationFrame((function(){$.hide()}))}}else ve()}}function F(e,n){void 0===n&&(n={});var r=R.plugins.concat(n.plugins||[]);document.addEventListener("touchstart",T,t),window.addEventListener("blur",L);var o=Object.assign({},n,{plugins:r}),i=h(e).reduce((function(e,t){var n=t&&_(t,o);return n&&e.push(n),e}),[]);return v(e)?i[0]:i}F.defaultProps=R,F.setDefaultProps=function(e){Object.keys(e).forEach((function(t){R[t]=e[t]}))},F.currentInput=x;var W=Object.assign({},e.applyStyles,{effect:function(e){var t=e.state,n={popper:{position:t.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};Object.assign(t.elements.popper.style,n.popper),t.styles=n,t.elements.arrow&&Object.assign(t.elements.arrow.style,n.arrow)}}),X={mouseover:"mouseenter",focusin:"focus",click:"click"};var Y={name:"animateFill",defaultValue:!1,fn:function(e){var t;if(null==(t=e.props.render)||!t.$$tippy)return{};var n=S(e.popper),r=n.box,o=n.content,i=e.props.animateFill?function(){var e=d();return e.className="tippy-backdrop",y([e],"hidden"),e}():null;return{onCreate:function(){i&&(r.insertBefore(i,r.firstElementChild),r.setAttribute("data-animatefill",""),r.style.overflow="hidden",e.setProps({arrow:!1,animation:"shift-away"}))},onMount:function(){if(i){var e=r.style.transitionDuration,t=Number(e.replace("ms",""));o.style.transitionDelay=Math.round(t/10)+"ms",i.style.transitionDuration=e,y([i],"visible")}},onShow:function(){i&&(i.style.transitionDuration="0ms")},onHide:function(){i&&y([i],"hidden")}}}};var $={clientX:0,clientY:0},q=[];function z(e){var t=e.clientX,n=e.clientY;$={clientX:t,clientY:n}}var J={name:"followCursor",defaultValue:!1,fn:function(e){var t=e.reference,n=w(e.props.triggerTarget||t),r=!1,o=!1,i=!0,a=e.props;function s(){return"initial"===e.props.followCursor&&e.state.isVisible}function u(){n.addEventListener("mousemove",f)}function c(){n.removeEventListener("mousemove",f)}function p(){r=!0,e.setProps({getReferenceClientRect:null}),r=!1}function f(n){var r=!n.target||t.contains(n.target),o=e.props.followCursor,i=n.clientX,a=n.clientY,s=t.getBoundingClientRect(),u=i-s.left,c=a-s.top;!r&&e.props.interactive||e.setProps({getReferenceClientRect:function(){var e=t.getBoundingClientRect(),n=i,r=a;"initial"===o&&(n=e.left+u,r=e.top+c);var s="horizontal"===o?e.top:r,p="vertical"===o?e.right:n,f="horizontal"===o?e.bottom:r,l="vertical"===o?e.left:n;return{width:p-l,height:f-s,top:s,right:p,bottom:f,left:l}}})}function l(){e.props.followCursor&&(q.push({instance:e,doc:n}),function(e){e.addEventListener("mousemove",z)}(n))}function d(){0===(q=q.filter((function(t){return t.instance!==e}))).filter((function(e){return e.doc===n})).length&&function(e){e.removeEventListener("mousemove",z)}(n)}return{onCreate:l,onDestroy:d,onBeforeUpdate:function(){a=e.props},onAfterUpdate:function(t,n){var i=n.followCursor;r||void 0!==i&&a.followCursor!==i&&(d(),i?(l(),!e.state.isMounted||o||s()||u()):(c(),p()))},onMount:function(){e.props.followCursor&&!o&&(i&&(f($),i=!1),s()||u())},onTrigger:function(e,t){m(t)&&($={clientX:t.clientX,clientY:t.clientY}),o="focus"===t.type},onHidden:function(){e.props.followCursor&&(p(),c(),i=!0)}}}};var G={name:"inlinePositioning",defaultValue:!1,fn:function(e){var t,n=e.reference;var r=-1,o=!1,i=[],a={name:"tippyInlinePositioning",enabled:!0,phase:"afterWrite",fn:function(o){var a=o.state;e.props.inlinePositioning&&(-1!==i.indexOf(a.placement)&&(i=[]),t!==a.placement&&-1===i.indexOf(a.placement)&&(i.push(a.placement),e.setProps({getReferenceClientRect:function(){return function(e){return function(e,t,n,r){if(n.length<2||null===e)return t;if(2===n.length&&r>=0&&n[0].left>n[1].right)return n[r]||t;switch(e){case"top":case"bottom":var o=n[0],i=n[n.length-1],a="top"===e,s=o.top,u=i.bottom,c=a?o.left:i.left,p=a?o.right:i.right;return{top:s,bottom:u,left:c,right:p,width:p-c,height:u-s};case"left":case"right":var f=Math.min.apply(Math,n.map((function(e){return e.left}))),l=Math.max.apply(Math,n.map((function(e){return e.right}))),d=n.filter((function(t){return"left"===e?t.left===f:t.right===l})),v=d[0].top,m=d[d.length-1].bottom;return{top:v,bottom:m,left:f,right:l,width:l-f,height:m-v};default:return t}}(p(e),n.getBoundingClientRect(),f(n.getClientRects()),r)}(a.placement)}})),t=a.placement)}};function s(){var t;o||(t=function(e,t){var n;return{popperOptions:Object.assign({},e.popperOptions,{modifiers:[].concat(((null==(n=e.popperOptions)?void 0:n.modifiers)||[]).filter((function(e){return e.name!==t.name})),[t])})}}(e.props,a),o=!0,e.setProps(t),o=!1)}return{onCreate:s,onAfterUpdate:s,onTrigger:function(t,n){if(m(n)){var o=f(e.reference.getClientRects()),i=o.find((function(e){return e.left-2<=n.clientX&&e.right+2>=n.clientX&&e.top-2<=n.clientY&&e.bottom+2>=n.clientY})),a=o.indexOf(i);r=a>-1?a:r}},onHidden:function(){r=-1}}}};var K={name:"sticky",defaultValue:!1,fn:function(e){var t=e.reference,n=e.popper;function r(t){return!0===e.props.sticky||e.props.sticky===t}var o=null,i=null;function a(){var s=r("reference")?(e.popperInstance?e.popperInstance.state.elements.reference:t).getBoundingClientRect():null,u=r("popper")?n.getBoundingClientRect():null;(s&&Q(o,s)||u&&Q(i,u))&&e.popperInstance&&e.popperInstance.update(),o=s,i=u,e.state.isMounted&&requestAnimationFrame(a)}return{onMount:function(){e.props.sticky&&a()}}}};function Q(e,t){return!e||!t||(e.top!==t.top||e.right!==t.right||e.bottom!==t.bottom||e.left!==t.left)}return F.setDefaultProps({plugins:[Y,J,G,K],render:N}),F.createSingleton=function(e,t){var n;void 0===t&&(t={});var r,o=e,i=[],a=[],c=t.overrides,p=[],f=!1;function l(){a=o.map((function(e){return u(e.props.triggerTarget||e.reference)})).reduce((function(e,t){return e.concat(t)}),[])}function v(){i=o.map((function(e){return e.reference}))}function m(e){o.forEach((function(t){e?t.enable():t.disable()}))}function g(e){return o.map((function(t){var n=t.setProps;return t.setProps=function(o){n(o),t.reference===r&&e.setProps(o)},function(){t.setProps=n}}))}function h(e,t){var n=a.indexOf(t);if(t!==r){r=t;var s=(c||[]).concat("content").reduce((function(e,t){return e[t]=o[n].props[t],e}),{});e.setProps(Object.assign({},s,{getReferenceClientRect:"function"==typeof s.getReferenceClientRect?s.getReferenceClientRect:function(){var e;return null==(e=i[n])?void 0:e.getBoundingClientRect()}}))}}m(!1),v(),l();var b={fn:function(){return{onDestroy:function(){m(!0)},onHidden:function(){r=null},onClickOutside:function(e){e.props.showOnCreate&&!f&&(f=!0,r=null)},onShow:function(e){e.props.showOnCreate&&!f&&(f=!0,h(e,i[0]))},onTrigger:function(e,t){h(e,t.currentTarget)}}}},y=F(d(),Object.assign({},s(t,["overrides"]),{plugins:[b].concat(t.plugins||[]),triggerTarget:a,popperOptions:Object.assign({},t.popperOptions,{modifiers:[].concat((null==(n=t.popperOptions)?void 0:n.modifiers)||[],[W])})})),w=y.show;y.show=function(e){if(w(),!r&&null==e)return h(y,i[0]);if(!r||null!=e){if("number"==typeof e)return i[e]&&h(y,i[e]);if(o.indexOf(e)>=0){var t=e.reference;return h(y,t)}return i.indexOf(e)>=0?h(y,e):void 0}},y.showNext=function(){var e=i[0];if(!r)return y.show(0);var t=i.indexOf(r);y.show(i[t+1]||e)},y.showPrevious=function(){var e=i[i.length-1];if(!r)return y.show(e);var t=i.indexOf(r),n=i[t-1]||e;y.show(n)};var E=y.setProps;return y.setProps=function(e){c=e.overrides||c,E(e)},y.setInstances=function(e){m(!0),p.forEach((function(e){return e()})),o=e,m(!1),v(),l(),p=g(y),y.setProps({triggerTarget:a})},p=g(y),y},F.delegate=function(e,n){var r=[],o=[],i=!1,a=n.target,c=s(n,["target"]),p=Object.assign({},c,{trigger:"manual",touch:!1}),f=Object.assign({touch:R.touch},c,{showOnCreate:!0}),l=F(e,p);function d(e){if(e.target&&!i){var t=e.target.closest(a);if(t){var r=t.getAttribute("data-tippy-trigger")||n.trigger||R.trigger;if(!t._tippy&&!("touchstart"===e.type&&"boolean"==typeof f.touch||"touchstart"!==e.type&&r.indexOf(X[e.type])<0)){var s=F(t,f);s&&(o=o.concat(s))}}}}function v(e,t,n,o){void 0===o&&(o=!1),e.addEventListener(t,n,o),r.push({node:e,eventType:t,handler:n,options:o})}return u(l).forEach((function(e){var n=e.destroy,a=e.enable,s=e.disable;e.destroy=function(e){void 0===e&&(e=!0),e&&o.forEach((function(e){e.destroy()})),o=[],r.forEach((function(e){var t=e.node,n=e.eventType,r=e.handler,o=e.options;t.removeEventListener(n,r,o)})),r=[],n()},e.enable=function(){a(),o.forEach((function(e){return e.enable()})),i=!1},e.disable=function(){s(),o.forEach((function(e){return e.disable()})),i=!0},function(e){var n=e.reference;v(n,"touchstart",d,t),v(n,"mouseover",d),v(n,"focusin",d),v(n,"click",d)}(e)})),l},F.hideAll=function(e){var t=void 0===e?{}:e,n=t.exclude,r=t.duration;U.forEach((function(e){var t=!1;if(n&&(t=g(n)?e.reference===n:e.popper===n.popper),!t){var o=e.props.duration;e.setProps({duration:r}),e.hide(),e.state.isDestroyed||e.setProps({duration:o})}}))},F.roundArrow='',F})); + diff --git a/site_libs/quarto-nav/headroom.min.js b/site_libs/quarto-nav/headroom.min.js new file mode 100644 index 000000000..b08f1dffb --- /dev/null +++ b/site_libs/quarto-nav/headroom.min.js @@ -0,0 +1,7 @@ +/*! + * headroom.js v0.12.0 - Give your page some headroom. Hide your header until you need it + * Copyright (c) 2020 Nick Williams - http://wicky.nillia.ms/headroom.js + * License: MIT + */ + +!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?module.exports=n():"function"==typeof define&&define.amd?define(n):(t=t||self).Headroom=n()}(this,function(){"use strict";function t(){return"undefined"!=typeof window}function d(t){return function(t){return t&&t.document&&function(t){return 9===t.nodeType}(t.document)}(t)?function(t){var n=t.document,o=n.body,s=n.documentElement;return{scrollHeight:function(){return Math.max(o.scrollHeight,s.scrollHeight,o.offsetHeight,s.offsetHeight,o.clientHeight,s.clientHeight)},height:function(){return t.innerHeight||s.clientHeight||o.clientHeight},scrollY:function(){return void 0!==t.pageYOffset?t.pageYOffset:(s||o.parentNode||o).scrollTop}}}(t):function(t){return{scrollHeight:function(){return Math.max(t.scrollHeight,t.offsetHeight,t.clientHeight)},height:function(){return Math.max(t.offsetHeight,t.clientHeight)},scrollY:function(){return t.scrollTop}}}(t)}function n(t,s,e){var n,o=function(){var n=!1;try{var t={get passive(){n=!0}};window.addEventListener("test",t,t),window.removeEventListener("test",t,t)}catch(t){n=!1}return n}(),i=!1,r=d(t),l=r.scrollY(),a={};function c(){var t=Math.round(r.scrollY()),n=r.height(),o=r.scrollHeight();a.scrollY=t,a.lastScrollY=l,a.direction=ls.tolerance[a.direction],e(a),l=t,i=!1}function h(){i||(i=!0,n=requestAnimationFrame(c))}var u=!!o&&{passive:!0,capture:!1};return t.addEventListener("scroll",h,u),c(),{destroy:function(){cancelAnimationFrame(n),t.removeEventListener("scroll",h,u)}}}function o(t){return t===Object(t)?t:{down:t,up:t}}function s(t,n){n=n||{},Object.assign(this,s.options,n),this.classes=Object.assign({},s.options.classes,n.classes),this.elem=t,this.tolerance=o(this.tolerance),this.offset=o(this.offset),this.initialised=!1,this.frozen=!1}return s.prototype={constructor:s,init:function(){return s.cutsTheMustard&&!this.initialised&&(this.addClass("initial"),this.initialised=!0,setTimeout(function(t){t.scrollTracker=n(t.scroller,{offset:t.offset,tolerance:t.tolerance},t.update.bind(t))},100,this)),this},destroy:function(){this.initialised=!1,Object.keys(this.classes).forEach(this.removeClass,this),this.scrollTracker.destroy()},unpin:function(){!this.hasClass("pinned")&&this.hasClass("unpinned")||(this.addClass("unpinned"),this.removeClass("pinned"),this.onUnpin&&this.onUnpin.call(this))},pin:function(){this.hasClass("unpinned")&&(this.addClass("pinned"),this.removeClass("unpinned"),this.onPin&&this.onPin.call(this))},freeze:function(){this.frozen=!0,this.addClass("frozen")},unfreeze:function(){this.frozen=!1,this.removeClass("frozen")},top:function(){this.hasClass("top")||(this.addClass("top"),this.removeClass("notTop"),this.onTop&&this.onTop.call(this))},notTop:function(){this.hasClass("notTop")||(this.addClass("notTop"),this.removeClass("top"),this.onNotTop&&this.onNotTop.call(this))},bottom:function(){this.hasClass("bottom")||(this.addClass("bottom"),this.removeClass("notBottom"),this.onBottom&&this.onBottom.call(this))},notBottom:function(){this.hasClass("notBottom")||(this.addClass("notBottom"),this.removeClass("bottom"),this.onNotBottom&&this.onNotBottom.call(this))},shouldUnpin:function(t){return"down"===t.direction&&!t.top&&t.toleranceExceeded},shouldPin:function(t){return"up"===t.direction&&t.toleranceExceeded||t.top},addClass:function(t){this.elem.classList.add.apply(this.elem.classList,this.classes[t].split(" "))},removeClass:function(t){this.elem.classList.remove.apply(this.elem.classList,this.classes[t].split(" "))},hasClass:function(t){return this.classes[t].split(" ").every(function(t){return this.classList.contains(t)},this.elem)},update:function(t){t.isOutOfBounds||!0!==this.frozen&&(t.top?this.top():this.notTop(),t.bottom?this.bottom():this.notBottom(),this.shouldUnpin(t)?this.unpin():this.shouldPin(t)&&this.pin())}},s.options={tolerance:{up:0,down:0},offset:0,scroller:t()?window:null,classes:{frozen:"headroom--frozen",pinned:"headroom--pinned",unpinned:"headroom--unpinned",top:"headroom--top",notTop:"headroom--not-top",bottom:"headroom--bottom",notBottom:"headroom--not-bottom",initial:"headroom"}},s.cutsTheMustard=!!(t()&&function(){}.bind&&"classList"in document.documentElement&&Object.assign&&Object.keys&&requestAnimationFrame),s}); diff --git a/site_libs/quarto-nav/quarto-nav.js b/site_libs/quarto-nav/quarto-nav.js new file mode 100644 index 000000000..ebfc262e4 --- /dev/null +++ b/site_libs/quarto-nav/quarto-nav.js @@ -0,0 +1,288 @@ +const headroomChanged = new CustomEvent("quarto-hrChanged", { + detail: {}, + bubbles: true, + cancelable: false, + composed: false, +}); + +window.document.addEventListener("DOMContentLoaded", function () { + let init = false; + + // Manage the back to top button, if one is present. + let lastScrollTop = window.pageYOffset || document.documentElement.scrollTop; + const scrollDownBuffer = 5; + const scrollUpBuffer = 35; + const btn = document.getElementById("quarto-back-to-top"); + const hideBackToTop = () => { + btn.style.display = "none"; + }; + const showBackToTop = () => { + btn.style.display = "inline-block"; + }; + if (btn) { + window.document.addEventListener( + "scroll", + function () { + const currentScrollTop = + window.pageYOffset || document.documentElement.scrollTop; + + // Shows and hides the button 'intelligently' as the user scrolls + if (currentScrollTop - scrollDownBuffer > lastScrollTop) { + hideBackToTop(); + lastScrollTop = currentScrollTop <= 0 ? 0 : currentScrollTop; + } else if (currentScrollTop < lastScrollTop - scrollUpBuffer) { + showBackToTop(); + lastScrollTop = currentScrollTop <= 0 ? 0 : currentScrollTop; + } + + // Show the button at the bottom, hides it at the top + if (currentScrollTop <= 0) { + hideBackToTop(); + } else if ( + window.innerHeight + currentScrollTop >= + document.body.offsetHeight + ) { + showBackToTop(); + } + }, + false + ); + } + + function throttle(func, wait) { + var timeout; + return function () { + const context = this; + const args = arguments; + const later = function () { + clearTimeout(timeout); + timeout = null; + func.apply(context, args); + }; + + if (!timeout) { + timeout = setTimeout(later, wait); + } + }; + } + + function headerOffset() { + // Set an offset if there is are fixed top navbar + const headerEl = window.document.querySelector("header.fixed-top"); + if (headerEl) { + return headerEl.clientHeight; + } else { + return 0; + } + } + + function footerOffset() { + const footerEl = window.document.querySelector("footer.footer"); + if (footerEl) { + return footerEl.clientHeight; + } else { + return 0; + } + } + + function dashboardOffset() { + const dashboardNavEl = window.document.getElementById( + "quarto-dashboard-header" + ); + if (dashboardNavEl !== null) { + return dashboardNavEl.clientHeight; + } else { + return 0; + } + } + + function updateDocumentOffsetWithoutAnimation() { + updateDocumentOffset(false); + } + + function updateDocumentOffset(animated) { + // set body offset + const topOffset = headerOffset(); + const bodyOffset = topOffset + footerOffset() + dashboardOffset(); + const bodyEl = window.document.body; + bodyEl.setAttribute("data-bs-offset", topOffset); + bodyEl.style.paddingTop = topOffset + "px"; + + // deal with sidebar offsets + const sidebars = window.document.querySelectorAll( + ".sidebar, .headroom-target" + ); + sidebars.forEach((sidebar) => { + if (!animated) { + sidebar.classList.add("notransition"); + // Remove the no transition class after the animation has time to complete + setTimeout(function () { + sidebar.classList.remove("notransition"); + }, 201); + } + + if (window.Headroom && sidebar.classList.contains("sidebar-unpinned")) { + sidebar.style.top = "0"; + sidebar.style.maxHeight = "100vh"; + } else { + sidebar.style.top = topOffset + "px"; + sidebar.style.maxHeight = "calc(100vh - " + topOffset + "px)"; + } + }); + + // allow space for footer + const mainContainer = window.document.querySelector(".quarto-container"); + if (mainContainer) { + mainContainer.style.minHeight = "calc(100vh - " + bodyOffset + "px)"; + } + + // link offset + let linkStyle = window.document.querySelector("#quarto-target-style"); + if (!linkStyle) { + linkStyle = window.document.createElement("style"); + linkStyle.setAttribute("id", "quarto-target-style"); + window.document.head.appendChild(linkStyle); + } + while (linkStyle.firstChild) { + linkStyle.removeChild(linkStyle.firstChild); + } + if (topOffset > 0) { + linkStyle.appendChild( + window.document.createTextNode(` + section:target::before { + content: ""; + display: block; + height: ${topOffset}px; + margin: -${topOffset}px 0 0; + }`) + ); + } + if (init) { + window.dispatchEvent(headroomChanged); + } + init = true; + } + + // initialize headroom + var header = window.document.querySelector("#quarto-header"); + if (header && window.Headroom) { + const headroom = new window.Headroom(header, { + tolerance: 5, + onPin: function () { + const sidebars = window.document.querySelectorAll( + ".sidebar, .headroom-target" + ); + sidebars.forEach((sidebar) => { + sidebar.classList.remove("sidebar-unpinned"); + }); + updateDocumentOffset(); + }, + onUnpin: function () { + const sidebars = window.document.querySelectorAll( + ".sidebar, .headroom-target" + ); + sidebars.forEach((sidebar) => { + sidebar.classList.add("sidebar-unpinned"); + }); + updateDocumentOffset(); + }, + }); + headroom.init(); + + let frozen = false; + window.quartoToggleHeadroom = function () { + if (frozen) { + headroom.unfreeze(); + frozen = false; + } else { + headroom.freeze(); + frozen = true; + } + }; + } + + window.addEventListener( + "hashchange", + function (e) { + if ( + getComputedStyle(document.documentElement).scrollBehavior !== "smooth" + ) { + window.scrollTo(0, window.pageYOffset - headerOffset()); + } + }, + false + ); + + // Observe size changed for the header + const headerEl = window.document.querySelector("header.fixed-top"); + if (headerEl && window.ResizeObserver) { + const observer = new window.ResizeObserver(() => { + setTimeout(updateDocumentOffsetWithoutAnimation, 0); + }); + observer.observe(headerEl, { + attributes: true, + childList: true, + characterData: true, + }); + } else { + window.addEventListener( + "resize", + throttle(updateDocumentOffsetWithoutAnimation, 50) + ); + } + setTimeout(updateDocumentOffsetWithoutAnimation, 250); + + // fixup index.html links if we aren't on the filesystem + if (window.location.protocol !== "file:") { + const links = window.document.querySelectorAll("a"); + for (let i = 0; i < links.length; i++) { + if (links[i].href) { + links[i].href = links[i].href.replace(/\/index\.html/, "/"); + } + } + + // Fixup any sharing links that require urls + // Append url to any sharing urls + const sharingLinks = window.document.querySelectorAll( + "a.sidebar-tools-main-item, a.quarto-navigation-tool, a.quarto-navbar-tools, a.quarto-navbar-tools-item" + ); + for (let i = 0; i < sharingLinks.length; i++) { + const sharingLink = sharingLinks[i]; + const href = sharingLink.getAttribute("href"); + if (href) { + sharingLink.setAttribute( + "href", + href.replace("|url|", window.location.href) + ); + } + } + + // Scroll the active navigation item into view, if necessary + const navSidebar = window.document.querySelector("nav#quarto-sidebar"); + if (navSidebar) { + // Find the active item + const activeItem = navSidebar.querySelector("li.sidebar-item a.active"); + if (activeItem) { + // Wait for the scroll height and height to resolve by observing size changes on the + // nav element that is scrollable + const resizeObserver = new ResizeObserver((_entries) => { + // The bottom of the element + const elBottom = activeItem.offsetTop; + const viewBottom = navSidebar.scrollTop + navSidebar.clientHeight; + + // The element height and scroll height are the same, then we are still loading + if (viewBottom !== navSidebar.scrollHeight) { + // Determine if the item isn't visible and scroll to it + if (elBottom >= viewBottom) { + navSidebar.scrollTop = elBottom; + } + + // stop observing now since we've completed the scroll + resizeObserver.unobserve(navSidebar); + } + }); + resizeObserver.observe(navSidebar); + } + } + } +}); diff --git a/site_libs/quarto-search/autocomplete.umd.js b/site_libs/quarto-search/autocomplete.umd.js new file mode 100644 index 000000000..ae0063aa9 --- /dev/null +++ b/site_libs/quarto-search/autocomplete.umd.js @@ -0,0 +1,3 @@ +/*! @algolia/autocomplete-js 1.11.1 | MIT License | © Algolia, Inc. and contributors | https://github.com/algolia/autocomplete */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self)["@algolia/autocomplete-js"]={})}(this,(function(e){"use strict";function t(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function n(e){for(var n=1;n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function a(e,t){return function(e){if(Array.isArray(e))return e}(e)||function(e,t){var n=null==e?null:"undefined"!=typeof Symbol&&e[Symbol.iterator]||e["@@iterator"];if(null!=n){var r,o,i,u,a=[],l=!0,c=!1;try{if(i=(n=n.call(e)).next,0===t){if(Object(n)!==n)return;l=!1}else for(;!(l=(r=i.call(n)).done)&&(a.push(r.value),a.length!==t);l=!0);}catch(e){c=!0,o=e}finally{try{if(!l&&null!=n.return&&(u=n.return(),Object(u)!==u))return}finally{if(c)throw o}}return a}}(e,t)||c(e,t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function l(e){return function(e){if(Array.isArray(e))return s(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||c(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function c(e,t){if(e){if("string"==typeof e)return s(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?s(e,t):void 0}}function s(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);ne.length)&&(t=e.length);for(var n=0,r=new Array(t);ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function x(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function N(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:20,n=[],r=0;r=3||2===n&&r>=4||1===n&&r>=10);function i(t,n,r){if(o&&void 0!==r){var i=r[0].__autocomplete_algoliaCredentials,u={"X-Algolia-Application-Id":i.appId,"X-Algolia-API-Key":i.apiKey};e.apply(void 0,[t].concat(D(n),[{headers:u}]))}else e.apply(void 0,[t].concat(D(n)))}return{init:function(t,n){e("init",{appId:t,apiKey:n})},setUserToken:function(t){e("setUserToken",t)},clickedObjectIDsAfterSearch:function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&i("clickedObjectIDsAfterSearch",B(t),t[0].items)},clickedObjectIDs:function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&i("clickedObjectIDs",B(t),t[0].items)},clickedFilters:function(){for(var t=arguments.length,n=new Array(t),r=0;r0&&e.apply(void 0,["clickedFilters"].concat(n))},convertedObjectIDsAfterSearch:function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&i("convertedObjectIDsAfterSearch",B(t),t[0].items)},convertedObjectIDs:function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&i("convertedObjectIDs",B(t),t[0].items)},convertedFilters:function(){for(var t=arguments.length,n=new Array(t),r=0;r0&&e.apply(void 0,["convertedFilters"].concat(n))},viewedObjectIDs:function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&t.reduce((function(e,t){var n=t.items,r=k(t,A);return[].concat(D(e),D(q(N(N({},r),{},{objectIDs:(null==n?void 0:n.map((function(e){return e.objectID})))||r.objectIDs})).map((function(e){return{items:n,payload:e}}))))}),[]).forEach((function(e){var t=e.items;return i("viewedObjectIDs",[e.payload],t)}))},viewedFilters:function(){for(var t=arguments.length,n=new Array(t),r=0;r0&&e.apply(void 0,["viewedFilters"].concat(n))}}}function F(e){var t=e.items.reduce((function(e,t){var n;return e[t.__autocomplete_indexName]=(null!==(n=e[t.__autocomplete_indexName])&&void 0!==n?n:[]).concat(t),e}),{});return Object.keys(t).map((function(e){return{index:e,items:t[e],algoliaSource:["autocomplete"]}}))}function L(e){return e.objectID&&e.__autocomplete_indexName&&e.__autocomplete_queryID}function U(e){return U="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},U(e)}function M(e){return function(e){if(Array.isArray(e))return H(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return H(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return H(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function H(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n0&&z({onItemsChange:r,items:n,insights:a,state:t}))}}),0);return{name:"aa.algoliaInsightsPlugin",subscribe:function(e){var t=e.setContext,n=e.onSelect,r=e.onActive;function l(e){t({algoliaInsightsPlugin:{__algoliaSearchParameters:W({clickAnalytics:!0},e?{userToken:e}:{}),insights:a}})}u("addAlgoliaAgent","insights-plugin"),l(),u("onUserTokenChange",l),u("getUserToken",null,(function(e,t){l(t)})),n((function(e){var t=e.item,n=e.state,r=e.event,i=e.source;L(t)&&o({state:n,event:r,insights:a,item:t,insightsEvents:[W({eventName:"Item Selected"},j({item:t,items:i.getItems().filter(L)}))]})})),r((function(e){var t=e.item,n=e.source,r=e.state,o=e.event;L(t)&&i({state:r,event:o,insights:a,item:t,insightsEvents:[W({eventName:"Item Active"},j({item:t,items:n.getItems().filter(L)}))]})}))},onStateChange:function(e){var t=e.state;c({state:t})},__autocomplete_pluginOptions:e}}function J(e,t){var n=t;return{then:function(t,r){return J(e.then(Y(t,n,e),Y(r,n,e)),n)},catch:function(t){return J(e.catch(Y(t,n,e)),n)},finally:function(t){return t&&n.onCancelList.push(t),J(e.finally(Y(t&&function(){return n.onCancelList=[],t()},n,e)),n)},cancel:function(){n.isCanceled=!0;var e=n.onCancelList;n.onCancelList=[],e.forEach((function(e){e()}))},isCanceled:function(){return!0===n.isCanceled}}}function X(e){return J(e,{isCanceled:!1,onCancelList:[]})}function Y(e,t,n){return e?function(n){return t.isCanceled?n:e(n)}:n}function Z(e,t,n,r){if(!n)return null;if(e<0&&(null===t||null!==r&&0===t))return n+e;var o=(null===t?-1:t)+e;return o<=-1||o>=n?null===r?null:0:o}function ee(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function te(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n0},reshape:function(e){return e.sources}},e),{},{id:null!==(n=e.id)&&void 0!==n?n:d(),plugins:o,initialState:he({activeItemId:null,query:"",completion:null,collections:[],isOpen:!1,status:"idle",context:{}},e.initialState),onStateChange:function(t){var n;null===(n=e.onStateChange)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onStateChange)||void 0===n?void 0:n.call(e,t)}))},onSubmit:function(t){var n;null===(n=e.onSubmit)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onSubmit)||void 0===n?void 0:n.call(e,t)}))},onReset:function(t){var n;null===(n=e.onReset)||void 0===n||n.call(e,t),o.forEach((function(e){var n;return null===(n=e.onReset)||void 0===n?void 0:n.call(e,t)}))},getSources:function(n){return Promise.all([].concat(ye(o.map((function(e){return e.getSources}))),[e.getSources]).filter(Boolean).map((function(e){return function(e,t){var n=[];return Promise.resolve(e(t)).then((function(e){return Promise.all(e.filter((function(e){return Boolean(e)})).map((function(e){if(e.sourceId,n.includes(e.sourceId))throw new Error("[Autocomplete] The `sourceId` ".concat(JSON.stringify(e.sourceId)," is not unique."));n.push(e.sourceId);var t={getItemInputValue:function(e){return e.state.query},getItemUrl:function(){},onSelect:function(e){(0,e.setIsOpen)(!1)},onActive:O,onResolve:O};Object.keys(t).forEach((function(e){t[e].__default=!0}));var r=te(te({},t),e);return Promise.resolve(r)})))}))}(e,n)}))).then((function(e){return m(e)})).then((function(e){return e.map((function(e){return he(he({},e),{},{onSelect:function(n){e.onSelect(n),t.forEach((function(e){var t;return null===(t=e.onSelect)||void 0===t?void 0:t.call(e,n)}))},onActive:function(n){e.onActive(n),t.forEach((function(e){var t;return null===(t=e.onActive)||void 0===t?void 0:t.call(e,n)}))},onResolve:function(n){e.onResolve(n),t.forEach((function(e){var t;return null===(t=e.onResolve)||void 0===t?void 0:t.call(e,n)}))}})}))}))},navigator:he({navigate:function(e){var t=e.itemUrl;r.location.assign(t)},navigateNewTab:function(e){var t=e.itemUrl,n=r.open(t,"_blank","noopener");null==n||n.focus()},navigateNewWindow:function(e){var t=e.itemUrl;r.open(t,"_blank","noopener")}},e.navigator)})}function Se(e){return Se="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},Se(e)}function je(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Pe(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}var He,Ve,We,Ke=null,Qe=(He=-1,Ve=-1,We=void 0,function(e){var t=++He;return Promise.resolve(e).then((function(e){return We&&t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function et(e){return et="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},et(e)}var tt=["props","refresh","store"],nt=["inputElement","formElement","panelElement"],rt=["inputElement"],ot=["inputElement","maxLength"],it=["source"],ut=["item","source"];function at(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function lt(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function ft(e){var t=e.props,n=e.refresh,r=e.store,o=st(e,tt);return{getEnvironmentProps:function(e){var n=e.inputElement,o=e.formElement,i=e.panelElement;function u(e){!r.getState().isOpen&&r.pendingRequests.isEmpty()||e.target===n||!1===[o,i].some((function(t){return n=t,r=e.target,n===r||n.contains(r);var n,r}))&&(r.dispatch("blur",null),t.debug||r.pendingRequests.cancelAll())}return lt({onTouchStart:u,onMouseDown:u,onTouchMove:function(e){!1!==r.getState().isOpen&&n===t.environment.document.activeElement&&e.target!==n&&n.blur()}},st(e,nt))},getRootProps:function(e){return lt({role:"combobox","aria-expanded":r.getState().isOpen,"aria-haspopup":"listbox","aria-owns":r.getState().isOpen?r.getState().collections.map((function(e){var n=e.source;return ie(t.id,"list",n)})).join(" "):void 0,"aria-labelledby":ie(t.id,"label")},e)},getFormProps:function(e){return e.inputElement,lt({action:"",noValidate:!0,role:"search",onSubmit:function(i){var u;i.preventDefault(),t.onSubmit(lt({event:i,refresh:n,state:r.getState()},o)),r.dispatch("submit",null),null===(u=e.inputElement)||void 0===u||u.blur()},onReset:function(i){var u;i.preventDefault(),t.onReset(lt({event:i,refresh:n,state:r.getState()},o)),r.dispatch("reset",null),null===(u=e.inputElement)||void 0===u||u.focus()}},st(e,rt))},getLabelProps:function(e){return lt({htmlFor:ie(t.id,"input"),id:ie(t.id,"label")},e)},getInputProps:function(e){var i;function u(e){(t.openOnFocus||Boolean(r.getState().query))&&$e(lt({event:e,props:t,query:r.getState().completion||r.getState().query,refresh:n,store:r},o)),r.dispatch("focus",null)}var a=e||{};a.inputElement;var l=a.maxLength,c=void 0===l?512:l,s=st(a,ot),f=oe(r.getState()),p=function(e){return Boolean(e&&e.match(ue))}((null===(i=t.environment.navigator)||void 0===i?void 0:i.userAgent)||""),m=t.enterKeyHint||(null!=f&&f.itemUrl&&!p?"go":"search");return lt({"aria-autocomplete":"both","aria-activedescendant":r.getState().isOpen&&null!==r.getState().activeItemId?ie(t.id,"item-".concat(r.getState().activeItemId),null==f?void 0:f.source):void 0,"aria-controls":r.getState().isOpen?r.getState().collections.map((function(e){var n=e.source;return ie(t.id,"list",n)})).join(" "):void 0,"aria-labelledby":ie(t.id,"label"),value:r.getState().completion||r.getState().query,id:ie(t.id,"input"),autoComplete:"off",autoCorrect:"off",autoCapitalize:"off",enterKeyHint:m,spellCheck:"false",autoFocus:t.autoFocus,placeholder:t.placeholder,maxLength:c,type:"search",onChange:function(e){$e(lt({event:e,props:t,query:e.currentTarget.value.slice(0,c),refresh:n,store:r},o))},onKeyDown:function(e){!function(e){var t=e.event,n=e.props,r=e.refresh,o=e.store,i=Ze(e,Ge);if("ArrowUp"===t.key||"ArrowDown"===t.key){var u=function(){var e=oe(o.getState()),t=n.environment.document.getElementById(ie(n.id,"item-".concat(o.getState().activeItemId),null==e?void 0:e.source));t&&(t.scrollIntoViewIfNeeded?t.scrollIntoViewIfNeeded(!1):t.scrollIntoView(!1))},a=function(){var e=oe(o.getState());if(null!==o.getState().activeItemId&&e){var n=e.item,u=e.itemInputValue,a=e.itemUrl,l=e.source;l.onActive(Xe({event:t,item:n,itemInputValue:u,itemUrl:a,refresh:r,source:l,state:o.getState()},i))}};t.preventDefault(),!1===o.getState().isOpen&&(n.openOnFocus||Boolean(o.getState().query))?$e(Xe({event:t,props:n,query:o.getState().query,refresh:r,store:o},i)).then((function(){o.dispatch(t.key,{nextActiveItemId:n.defaultActiveItemId}),a(),setTimeout(u,0)})):(o.dispatch(t.key,{}),a(),u())}else if("Escape"===t.key)t.preventDefault(),o.dispatch(t.key,null),o.pendingRequests.cancelAll();else if("Tab"===t.key)o.dispatch("blur",null),o.pendingRequests.cancelAll();else if("Enter"===t.key){if(null===o.getState().activeItemId||o.getState().collections.every((function(e){return 0===e.items.length})))return void(n.debug||o.pendingRequests.cancelAll());t.preventDefault();var l=oe(o.getState()),c=l.item,s=l.itemInputValue,f=l.itemUrl,p=l.source;if(t.metaKey||t.ctrlKey)void 0!==f&&(p.onSelect(Xe({event:t,item:c,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},i)),n.navigator.navigateNewTab({itemUrl:f,item:c,state:o.getState()}));else if(t.shiftKey)void 0!==f&&(p.onSelect(Xe({event:t,item:c,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},i)),n.navigator.navigateNewWindow({itemUrl:f,item:c,state:o.getState()}));else if(t.altKey);else{if(void 0!==f)return p.onSelect(Xe({event:t,item:c,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},i)),void n.navigator.navigate({itemUrl:f,item:c,state:o.getState()});$e(Xe({event:t,nextState:{isOpen:!1},props:n,query:s,refresh:r,store:o},i)).then((function(){p.onSelect(Xe({event:t,item:c,itemInputValue:s,itemUrl:f,refresh:r,source:p,state:o.getState()},i))}))}}}(lt({event:e,props:t,refresh:n,store:r},o))},onFocus:u,onBlur:O,onClick:function(n){e.inputElement!==t.environment.document.activeElement||r.getState().isOpen||u(n)}},s)},getPanelProps:function(e){return lt({onMouseDown:function(e){e.preventDefault()},onMouseLeave:function(){r.dispatch("mouseleave",null)}},e)},getListProps:function(e){var n=e||{},r=n.source,o=st(n,it);return lt({role:"listbox","aria-labelledby":ie(t.id,"label"),id:ie(t.id,"list",r)},o)},getItemProps:function(e){var i=e.item,u=e.source,a=st(e,ut);return lt({id:ie(t.id,"item-".concat(i.__autocomplete_id),u),role:"option","aria-selected":r.getState().activeItemId===i.__autocomplete_id,onMouseMove:function(e){if(i.__autocomplete_id!==r.getState().activeItemId){r.dispatch("mousemove",i.__autocomplete_id);var t=oe(r.getState());if(null!==r.getState().activeItemId&&t){var u=t.item,a=t.itemInputValue,l=t.itemUrl,c=t.source;c.onActive(lt({event:e,item:u,itemInputValue:a,itemUrl:l,refresh:n,source:c,state:r.getState()},o))}}},onMouseDown:function(e){e.preventDefault()},onClick:function(e){var a=u.getItemInputValue({item:i,state:r.getState()}),l=u.getItemUrl({item:i,state:r.getState()});(l?Promise.resolve():$e(lt({event:e,nextState:{isOpen:!1},props:t,query:a,refresh:n,store:r},o))).then((function(){u.onSelect(lt({event:e,item:i,itemInputValue:a,itemUrl:l,refresh:n,source:u,state:r.getState()},o))}))}},a)}}}function pt(e){return pt="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},pt(e)}function mt(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function vt(e){for(var t=1;t=5&&((o||!e&&5===r)&&(u.push(r,0,o,n),r=6),e&&(u.push(r,e,0,n),r=6)),o=""},l=0;l"===t?(r=1,o=""):o=t+o[0]:i?t===i?i="":o+=t:'"'===t||"'"===t?i=t:">"===t?(a(),r=1):r&&("="===t?(r=5,n=o,o=""):"/"===t&&(r<5||">"===e[l][c+1])?(a(),3===r&&(u=u[0]),r=u,(u=u[0]).push(2,0,r),r=0):" "===t||"\t"===t||"\n"===t||"\r"===t?(a(),r=2):o+=t),3===r&&"!--"===o&&(r=4,u=u[0])}return a(),u}(e)),t),arguments,[])).length>1?t:t[0]}var kt=function(e){var t=e.environment,n=t.document.createElementNS("http://www.w3.org/2000/svg","svg");n.setAttribute("class","aa-ClearIcon"),n.setAttribute("viewBox","0 0 24 24"),n.setAttribute("width","18"),n.setAttribute("height","18"),n.setAttribute("fill","currentColor");var r=t.document.createElementNS("http://www.w3.org/2000/svg","path");return r.setAttribute("d","M5.293 6.707l5.293 5.293-5.293 5.293c-0.391 0.391-0.391 1.024 0 1.414s1.024 0.391 1.414 0l5.293-5.293 5.293 5.293c0.391 0.391 1.024 0.391 1.414 0s0.391-1.024 0-1.414l-5.293-5.293 5.293-5.293c0.391-0.391 0.391-1.024 0-1.414s-1.024-0.391-1.414 0l-5.293 5.293-5.293-5.293c-0.391-0.391-1.024-0.391-1.414 0s-0.391 1.024 0 1.414z"),n.appendChild(r),n};function xt(e,t){if("string"==typeof t){var n=e.document.querySelector(t);return"The element ".concat(JSON.stringify(t)," is not in the document."),n}return t}function Nt(){for(var e=arguments.length,t=new Array(e),n=0;n2&&(u.children=arguments.length>3?Jt.call(arguments,2):n),"function"==typeof e&&null!=e.defaultProps)for(i in e.defaultProps)void 0===u[i]&&(u[i]=e.defaultProps[i]);return sn(e,u,r,o,null)}function sn(e,t,n,r,o){var i={type:e,props:t,key:n,ref:r,__k:null,__:null,__b:0,__e:null,__d:void 0,__c:null,__h:null,constructor:void 0,__v:null==o?++Yt:o};return null==o&&null!=Xt.vnode&&Xt.vnode(i),i}function fn(e){return e.children}function pn(e,t){this.props=e,this.context=t}function mn(e,t){if(null==t)return e.__?mn(e.__,e.__.__k.indexOf(e)+1):null;for(var n;tt&&Zt.sort(nn));yn.__r=0}function bn(e,t,n,r,o,i,u,a,l,c){var s,f,p,m,v,d,y,b=r&&r.__k||on,g=b.length;for(n.__k=[],s=0;s0?sn(m.type,m.props,m.key,m.ref?m.ref:null,m.__v):m)){if(m.__=n,m.__b=n.__b+1,null===(p=b[s])||p&&m.key==p.key&&m.type===p.type)b[s]=void 0;else for(f=0;f=0;t--)if((n=e.__k[t])&&(r=On(n)))return r;return null}function _n(e,t,n){"-"===t[0]?e.setProperty(t,null==n?"":n):e[t]=null==n?"":"number"!=typeof n||un.test(t)?n:n+"px"}function Sn(e,t,n,r,o){var i;e:if("style"===t)if("string"==typeof n)e.style.cssText=n;else{if("string"==typeof r&&(e.style.cssText=r=""),r)for(t in r)n&&t in n||_n(e.style,t,"");if(n)for(t in n)r&&n[t]===r[t]||_n(e.style,t,n[t])}else if("o"===t[0]&&"n"===t[1])i=t!==(t=t.replace(/Capture$/,"")),t=t.toLowerCase()in e?t.toLowerCase().slice(2):t.slice(2),e.l||(e.l={}),e.l[t+i]=n,n?r||e.addEventListener(t,i?Pn:jn,i):e.removeEventListener(t,i?Pn:jn,i);else if("dangerouslySetInnerHTML"!==t){if(o)t=t.replace(/xlink(H|:h)/,"h").replace(/sName$/,"s");else if("width"!==t&&"height"!==t&&"href"!==t&&"list"!==t&&"form"!==t&&"tabIndex"!==t&&"download"!==t&&t in e)try{e[t]=null==n?"":n;break e}catch(e){}"function"==typeof n||(null==n||!1===n&&"-"!==t[4]?e.removeAttribute(t):e.setAttribute(t,n))}}function jn(e){return this.l[e.type+!1](Xt.event?Xt.event(e):e)}function Pn(e){return this.l[e.type+!0](Xt.event?Xt.event(e):e)}function wn(e,t,n,r,o,i,u,a,l){var c,s,f,p,m,v,d,y,b,g,h,O,_,S,j,P=t.type;if(void 0!==t.constructor)return null;null!=n.__h&&(l=n.__h,a=t.__e=n.__e,t.__h=null,i=[a]),(c=Xt.__b)&&c(t);try{e:if("function"==typeof P){if(y=t.props,b=(c=P.contextType)&&r[c.__c],g=c?b?b.props.value:c.__:r,n.__c?d=(s=t.__c=n.__c).__=s.__E:("prototype"in P&&P.prototype.render?t.__c=s=new P(y,g):(t.__c=s=new pn(y,g),s.constructor=P,s.render=Cn),b&&b.sub(s),s.props=y,s.state||(s.state={}),s.context=g,s.__n=r,f=s.__d=!0,s.__h=[],s._sb=[]),null==s.__s&&(s.__s=s.state),null!=P.getDerivedStateFromProps&&(s.__s==s.state&&(s.__s=an({},s.__s)),an(s.__s,P.getDerivedStateFromProps(y,s.__s))),p=s.props,m=s.state,s.__v=t,f)null==P.getDerivedStateFromProps&&null!=s.componentWillMount&&s.componentWillMount(),null!=s.componentDidMount&&s.__h.push(s.componentDidMount);else{if(null==P.getDerivedStateFromProps&&y!==p&&null!=s.componentWillReceiveProps&&s.componentWillReceiveProps(y,g),!s.__e&&null!=s.shouldComponentUpdate&&!1===s.shouldComponentUpdate(y,s.__s,g)||t.__v===n.__v){for(t.__v!==n.__v&&(s.props=y,s.state=s.__s,s.__d=!1),s.__e=!1,t.__e=n.__e,t.__k=n.__k,t.__k.forEach((function(e){e&&(e.__=t)})),h=0;h0&&void 0!==arguments[0]?arguments[0]:[];return{get:function(){return e},add:function(t){var n=e[e.length-1];(null==n?void 0:n.isHighlighted)===t.isHighlighted?e[e.length-1]={value:n.value+t.value,isHighlighted:n.isHighlighted}:e.push(t)}}}(n?[{value:n,isHighlighted:!1}]:[]);return t.forEach((function(e){var t=e.split(xn);r.add({value:t[0],isHighlighted:!0}),""!==t[1]&&r.add({value:t[1],isHighlighted:!1})})),r.get()}function Tn(e){return function(e){if(Array.isArray(e))return qn(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return qn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return qn(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function qn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n",""":'"',"'":"'"},Fn=new RegExp(/\w/i),Ln=/&(amp|quot|lt|gt|#39);/g,Un=RegExp(Ln.source);function Mn(e,t){var n,r,o,i=e[t],u=(null===(n=e[t+1])||void 0===n?void 0:n.isHighlighted)||!0,a=(null===(r=e[t-1])||void 0===r?void 0:r.isHighlighted)||!0;return Fn.test((o=i.value)&&Un.test(o)?o.replace(Ln,(function(e){return Rn[e]})):o)||a!==u?i.isHighlighted:a}function Hn(e){return Hn="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},Hn(e)}function Vn(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function Wn(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function ur(e){return function(e){if(Array.isArray(e))return ar(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return ar(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return ar(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function ar(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n0;if(!O.value.core.openOnFocus&&!t.query)return n;var r=Boolean(y.current||O.value.renderer.renderNoResults);return!n&&r||n},__autocomplete_metadata:{userAgents:br,options:e}}))})),j=f(n({collections:[],completion:null,context:{},isOpen:!1,query:"",activeItemId:null,status:"idle"},O.value.core.initialState)),P={getEnvironmentProps:O.value.renderer.getEnvironmentProps,getFormProps:O.value.renderer.getFormProps,getInputProps:O.value.renderer.getInputProps,getItemProps:O.value.renderer.getItemProps,getLabelProps:O.value.renderer.getLabelProps,getListProps:O.value.renderer.getListProps,getPanelProps:O.value.renderer.getPanelProps,getRootProps:O.value.renderer.getRootProps},w={setActiveItemId:S.value.setActiveItemId,setQuery:S.value.setQuery,setCollections:S.value.setCollections,setIsOpen:S.value.setIsOpen,setStatus:S.value.setStatus,setContext:S.value.setContext,refresh:S.value.refresh,navigator:S.value.navigator},I=m((function(){return Ct.bind(O.value.renderer.renderer.createElement)})),A=m((function(){return Gt({autocomplete:S.value,autocompleteScopeApi:w,classNames:O.value.renderer.classNames,environment:O.value.core.environment,isDetached:_.value,placeholder:O.value.core.placeholder,propGetters:P,setIsModalOpen:k,state:j.current,translations:O.value.renderer.translations})}));function E(){Ht(A.value.panel,{style:_.value?{}:yr({panelPlacement:O.value.renderer.panelPlacement,container:A.value.root,form:A.value.form,environment:O.value.core.environment})})}function D(e){j.current=e;var t={autocomplete:S.value,autocompleteScopeApi:w,classNames:O.value.renderer.classNames,components:O.value.renderer.components,container:O.value.renderer.container,html:I.value,dom:A.value,panelContainer:_.value?A.value.detachedContainer:O.value.renderer.panelContainer,propGetters:P,state:j.current,renderer:O.value.renderer.renderer},r=!b(e)&&!y.current&&O.value.renderer.renderNoResults||O.value.renderer.render;!function(e){var t=e.autocomplete,r=e.autocompleteScopeApi,o=e.dom,i=e.propGetters,u=e.state;Vt(o.root,i.getRootProps(n({state:u,props:t.getRootProps({})},r))),Vt(o.input,i.getInputProps(n({state:u,props:t.getInputProps({inputElement:o.input}),inputElement:o.input},r))),Ht(o.label,{hidden:"stalled"===u.status}),Ht(o.loadingIndicator,{hidden:"stalled"!==u.status}),Ht(o.clearButton,{hidden:!u.query}),Ht(o.detachedSearchButtonQuery,{textContent:u.query}),Ht(o.detachedSearchButtonPlaceholder,{hidden:Boolean(u.query)})}(t),function(e,t){var r=t.autocomplete,o=t.autocompleteScopeApi,u=t.classNames,a=t.html,l=t.dom,c=t.panelContainer,s=t.propGetters,f=t.state,p=t.components,m=t.renderer;if(f.isOpen){c.contains(l.panel)||"loading"===f.status||c.appendChild(l.panel),l.panel.classList.toggle("aa-Panel--stalled","stalled"===f.status);var v=f.collections.filter((function(e){var t=e.source,n=e.items;return t.templates.noResults||n.length>0})).map((function(e,t){var l=e.source,c=e.items;return m.createElement("section",{key:t,className:u.source,"data-autocomplete-source-id":l.sourceId},l.templates.header&&m.createElement("div",{className:u.sourceHeader},l.templates.header({components:p,createElement:m.createElement,Fragment:m.Fragment,items:c,source:l,state:f,html:a})),l.templates.noResults&&0===c.length?m.createElement("div",{className:u.sourceNoResults},l.templates.noResults({components:p,createElement:m.createElement,Fragment:m.Fragment,source:l,state:f,html:a})):m.createElement("ul",i({className:u.list},s.getListProps(n({state:f,props:r.getListProps({source:l})},o))),c.map((function(e){var t=r.getItemProps({item:e,source:l});return m.createElement("li",i({key:t.id,className:u.item},s.getItemProps(n({state:f,props:t},o))),l.templates.item({components:p,createElement:m.createElement,Fragment:m.Fragment,item:e,state:f,html:a}))}))),l.templates.footer&&m.createElement("div",{className:u.sourceFooter},l.templates.footer({components:p,createElement:m.createElement,Fragment:m.Fragment,items:c,source:l,state:f,html:a})))})),d=m.createElement(m.Fragment,null,m.createElement("div",{className:u.panelLayout},v),m.createElement("div",{className:"aa-GradientBottom"})),y=v.reduce((function(e,t){return e[t.props["data-autocomplete-source-id"]]=t,e}),{});e(n(n({children:d,state:f,sections:v,elements:y},m),{},{components:p,html:a},o),l.panel)}else c.contains(l.panel)&&c.removeChild(l.panel)}(r,t)}function C(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};l();var t=O.value.renderer,n=t.components,r=u(t,gr);g.current=qt(r,O.value.core,{components:Bt(n,(function(e){return!e.value.hasOwnProperty("__autocomplete_componentName")})),initialState:j.current},e),v(),c(),S.value.refresh().then((function(){D(j.current)}))}function k(e){requestAnimationFrame((function(){var t=O.value.core.environment.document.body.contains(A.value.detachedOverlay);e!==t&&(e?(O.value.core.environment.document.body.appendChild(A.value.detachedOverlay),O.value.core.environment.document.body.classList.add("aa-Detached"),A.value.input.focus()):(O.value.core.environment.document.body.removeChild(A.value.detachedOverlay),O.value.core.environment.document.body.classList.remove("aa-Detached")))}))}return a((function(){var e=S.value.getEnvironmentProps({formElement:A.value.form,panelElement:A.value.panel,inputElement:A.value.input});return Ht(O.value.core.environment,e),function(){Ht(O.value.core.environment,Object.keys(e).reduce((function(e,t){return n(n({},e),{},o({},t,void 0))}),{}))}})),a((function(){var e=_.value?O.value.core.environment.document.body:O.value.renderer.panelContainer,t=_.value?A.value.detachedOverlay:A.value.panel;return _.value&&j.current.isOpen&&k(!0),D(j.current),function(){e.contains(t)&&e.removeChild(t)}})),a((function(){var e=O.value.renderer.container;return e.appendChild(A.value.root),function(){e.removeChild(A.value.root)}})),a((function(){var e=p((function(e){D(e.state)}),0);return h.current=function(t){var n=t.state,r=t.prevState;(_.value&&r.isOpen!==n.isOpen&&k(n.isOpen),_.value||!n.isOpen||r.isOpen||E(),n.query!==r.query)&&O.value.core.environment.document.querySelectorAll(".aa-Panel--scrollable").forEach((function(e){0!==e.scrollTop&&(e.scrollTop=0)}));e({state:n})},function(){h.current=void 0}})),a((function(){var e=p((function(){var e=_.value;_.value=O.value.core.environment.matchMedia(O.value.renderer.detachedMediaQuery).matches,e!==_.value?C({}):requestAnimationFrame(E)}),20);return O.value.core.environment.addEventListener("resize",e),function(){O.value.core.environment.removeEventListener("resize",e)}})),a((function(){if(!_.value)return function(){};function e(e){A.value.detachedContainer.classList.toggle("aa-DetachedContainer--modal",e)}function t(t){e(t.matches)}var n=O.value.core.environment.matchMedia(getComputedStyle(O.value.core.environment.document.documentElement).getPropertyValue("--aa-detached-modal-media-query"));e(n.matches);var r=Boolean(n.addEventListener);return r?n.addEventListener("change",t):n.addListener(t),function(){r?n.removeEventListener("change",t):n.removeListener(t)}})),a((function(){return requestAnimationFrame(E),function(){}})),n(n({},w),{},{update:C,destroy:function(){l()}})},e.getAlgoliaFacets=function(e){var t=hr({transformResponse:function(e){return e.facetHits}}),r=e.queries.map((function(e){return n(n({},e),{},{type:"facet"})}));return t(n(n({},e),{},{queries:r}))},e.getAlgoliaResults=Or,Object.defineProperty(e,"__esModule",{value:!0})})); + diff --git a/site_libs/quarto-search/fuse.min.js b/site_libs/quarto-search/fuse.min.js new file mode 100644 index 000000000..adc28356e --- /dev/null +++ b/site_libs/quarto-search/fuse.min.js @@ -0,0 +1,9 @@ +/** + * Fuse.js v6.6.2 - Lightweight fuzzy-search (http://fusejs.io) + * + * Copyright (c) 2022 Kiro Risk (http://kiro.me) + * All Rights Reserved. Apache Software License 2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ +var e,t;e=this,t=function(){"use strict";function e(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function t(t){for(var n=1;ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n0&&void 0!==arguments[0]?arguments[0]:1,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:3,n=new Map,r=Math.pow(10,t);return{get:function(t){var i=t.match(C).length;if(n.has(i))return n.get(i);var o=1/Math.pow(i,.5*e),c=parseFloat(Math.round(o*r)/r);return n.set(i,c),c},clear:function(){n.clear()}}}var $=function(){function e(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.getFn,i=void 0===n?I.getFn:n,o=t.fieldNormWeight,c=void 0===o?I.fieldNormWeight:o;r(this,e),this.norm=E(c,3),this.getFn=i,this.isCreated=!1,this.setIndexRecords()}return o(e,[{key:"setSources",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.docs=e}},{key:"setIndexRecords",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.records=e}},{key:"setKeys",value:function(){var e=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.keys=t,this._keysMap={},t.forEach((function(t,n){e._keysMap[t.id]=n}))}},{key:"create",value:function(){var e=this;!this.isCreated&&this.docs.length&&(this.isCreated=!0,g(this.docs[0])?this.docs.forEach((function(t,n){e._addString(t,n)})):this.docs.forEach((function(t,n){e._addObject(t,n)})),this.norm.clear())}},{key:"add",value:function(e){var t=this.size();g(e)?this._addString(e,t):this._addObject(e,t)}},{key:"removeAt",value:function(e){this.records.splice(e,1);for(var t=e,n=this.size();t2&&void 0!==arguments[2]?arguments[2]:{},r=n.getFn,i=void 0===r?I.getFn:r,o=n.fieldNormWeight,c=void 0===o?I.fieldNormWeight:o,a=new $({getFn:i,fieldNormWeight:c});return a.setKeys(e.map(_)),a.setSources(t),a.create(),a}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.errors,r=void 0===n?0:n,i=t.currentLocation,o=void 0===i?0:i,c=t.expectedLocation,a=void 0===c?0:c,s=t.distance,u=void 0===s?I.distance:s,h=t.ignoreLocation,l=void 0===h?I.ignoreLocation:h,f=r/e.length;if(l)return f;var d=Math.abs(a-o);return u?f+d/u:d?1:f}function N(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:I.minMatchCharLength,n=[],r=-1,i=-1,o=0,c=e.length;o=t&&n.push([r,i]),r=-1)}return e[o-1]&&o-r>=t&&n.push([r,o-1]),n}var P=32;function W(e){for(var t={},n=0,r=e.length;n1&&void 0!==arguments[1]?arguments[1]:{},o=i.location,c=void 0===o?I.location:o,a=i.threshold,s=void 0===a?I.threshold:a,u=i.distance,h=void 0===u?I.distance:u,l=i.includeMatches,f=void 0===l?I.includeMatches:l,d=i.findAllMatches,v=void 0===d?I.findAllMatches:d,g=i.minMatchCharLength,y=void 0===g?I.minMatchCharLength:g,p=i.isCaseSensitive,m=void 0===p?I.isCaseSensitive:p,k=i.ignoreLocation,M=void 0===k?I.ignoreLocation:k;if(r(this,e),this.options={location:c,threshold:s,distance:h,includeMatches:f,findAllMatches:v,minMatchCharLength:y,isCaseSensitive:m,ignoreLocation:M},this.pattern=m?t:t.toLowerCase(),this.chunks=[],this.pattern.length){var b=function(e,t){n.chunks.push({pattern:e,alphabet:W(e),startIndex:t})},x=this.pattern.length;if(x>P){for(var w=0,L=x%P,S=x-L;w3&&void 0!==arguments[3]?arguments[3]:{},i=r.location,o=void 0===i?I.location:i,c=r.distance,a=void 0===c?I.distance:c,s=r.threshold,u=void 0===s?I.threshold:s,h=r.findAllMatches,l=void 0===h?I.findAllMatches:h,f=r.minMatchCharLength,d=void 0===f?I.minMatchCharLength:f,v=r.includeMatches,g=void 0===v?I.includeMatches:v,y=r.ignoreLocation,p=void 0===y?I.ignoreLocation:y;if(t.length>P)throw new Error(w(P));for(var m,k=t.length,M=e.length,b=Math.max(0,Math.min(o,M)),x=u,L=b,S=d>1||g,_=S?Array(M):[];(m=e.indexOf(t,L))>-1;){var O=R(t,{currentLocation:m,expectedLocation:b,distance:a,ignoreLocation:p});if(x=Math.min(O,x),L=m+k,S)for(var j=0;j=z;q-=1){var B=q-1,J=n[e.charAt(B)];if(S&&(_[B]=+!!J),K[q]=(K[q+1]<<1|1)&J,F&&(K[q]|=(A[q+1]|A[q])<<1|1|A[q+1]),K[q]&$&&(C=R(t,{errors:F,currentLocation:B,expectedLocation:b,distance:a,ignoreLocation:p}))<=x){if(x=C,(L=B)<=b)break;z=Math.max(1,2*b-L)}}if(R(t,{errors:F+1,currentLocation:b,expectedLocation:b,distance:a,ignoreLocation:p})>x)break;A=K}var U={isMatch:L>=0,score:Math.max(.001,C)};if(S){var V=N(_,d);V.length?g&&(U.indices=V):U.isMatch=!1}return U}(e,n,i,{location:c+o,distance:a,threshold:s,findAllMatches:u,minMatchCharLength:h,includeMatches:r,ignoreLocation:l}),p=y.isMatch,m=y.score,k=y.indices;p&&(g=!0),v+=m,p&&k&&(d=[].concat(f(d),f(k)))}));var y={isMatch:g,score:g?v/this.chunks.length:1};return g&&r&&(y.indices=d),y}}]),e}(),z=function(){function e(t){r(this,e),this.pattern=t}return o(e,[{key:"search",value:function(){}}],[{key:"isMultiMatch",value:function(e){return D(e,this.multiRegex)}},{key:"isSingleMatch",value:function(e){return D(e,this.singleRegex)}}]),e}();function D(e,t){var n=e.match(t);return n?n[1]:null}var K=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e===this.pattern;return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"exact"}},{key:"multiRegex",get:function(){return/^="(.*)"$/}},{key:"singleRegex",get:function(){return/^=(.*)$/}}]),n}(z),q=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=-1===e.indexOf(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"$/}},{key:"singleRegex",get:function(){return/^!(.*)$/}}]),n}(z),B=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"prefix-exact"}},{key:"multiRegex",get:function(){return/^\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^\^(.*)$/}}]),n}(z),J=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=!e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-prefix-exact"}},{key:"multiRegex",get:function(){return/^!\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^!\^(.*)$/}}]),n}(z),U=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[e.length-this.pattern.length,e.length-1]}}}],[{key:"type",get:function(){return"suffix-exact"}},{key:"multiRegex",get:function(){return/^"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^(.*)\$$/}}]),n}(z),V=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=!e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-suffix-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^!(.*)\$$/}}]),n}(z),G=function(e){a(n,e);var t=l(n);function n(e){var i,o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},c=o.location,a=void 0===c?I.location:c,s=o.threshold,u=void 0===s?I.threshold:s,h=o.distance,l=void 0===h?I.distance:h,f=o.includeMatches,d=void 0===f?I.includeMatches:f,v=o.findAllMatches,g=void 0===v?I.findAllMatches:v,y=o.minMatchCharLength,p=void 0===y?I.minMatchCharLength:y,m=o.isCaseSensitive,k=void 0===m?I.isCaseSensitive:m,M=o.ignoreLocation,b=void 0===M?I.ignoreLocation:M;return r(this,n),(i=t.call(this,e))._bitapSearch=new T(e,{location:a,threshold:u,distance:l,includeMatches:d,findAllMatches:g,minMatchCharLength:p,isCaseSensitive:k,ignoreLocation:b}),i}return o(n,[{key:"search",value:function(e){return this._bitapSearch.searchIn(e)}}],[{key:"type",get:function(){return"fuzzy"}},{key:"multiRegex",get:function(){return/^"(.*)"$/}},{key:"singleRegex",get:function(){return/^(.*)$/}}]),n}(z),H=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){for(var t,n=0,r=[],i=this.pattern.length;(t=e.indexOf(this.pattern,n))>-1;)n=t+i,r.push([t,n-1]);var o=!!r.length;return{isMatch:o,score:o?0:1,indices:r}}}],[{key:"type",get:function(){return"include"}},{key:"multiRegex",get:function(){return/^'"(.*)"$/}},{key:"singleRegex",get:function(){return/^'(.*)$/}}]),n}(z),Q=[K,H,B,J,V,U,q,G],X=Q.length,Y=/ +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)/;function Z(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e.split("|").map((function(e){for(var n=e.trim().split(Y).filter((function(e){return e&&!!e.trim()})),r=[],i=0,o=n.length;i1&&void 0!==arguments[1]?arguments[1]:{},i=n.isCaseSensitive,o=void 0===i?I.isCaseSensitive:i,c=n.includeMatches,a=void 0===c?I.includeMatches:c,s=n.minMatchCharLength,u=void 0===s?I.minMatchCharLength:s,h=n.ignoreLocation,l=void 0===h?I.ignoreLocation:h,f=n.findAllMatches,d=void 0===f?I.findAllMatches:f,v=n.location,g=void 0===v?I.location:v,y=n.threshold,p=void 0===y?I.threshold:y,m=n.distance,k=void 0===m?I.distance:m;r(this,e),this.query=null,this.options={isCaseSensitive:o,includeMatches:a,minMatchCharLength:u,findAllMatches:d,ignoreLocation:l,location:g,threshold:p,distance:k},this.pattern=o?t:t.toLowerCase(),this.query=Z(this.pattern,this.options)}return o(e,[{key:"searchIn",value:function(e){var t=this.query;if(!t)return{isMatch:!1,score:1};var n=this.options,r=n.includeMatches;e=n.isCaseSensitive?e:e.toLowerCase();for(var i=0,o=[],c=0,a=0,s=t.length;a-1&&(n.refIndex=e.idx),t.matches.push(n)}}))}function ve(e,t){t.score=e.score}function ge(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.includeMatches,i=void 0===r?I.includeMatches:r,o=n.includeScore,c=void 0===o?I.includeScore:o,a=[];return i&&a.push(de),c&&a.push(ve),e.map((function(e){var n=e.idx,r={item:t[n],refIndex:n};return a.length&&a.forEach((function(t){t(e,r)})),r}))}var ye=function(){function e(n){var i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=arguments.length>2?arguments[2]:void 0;r(this,e),this.options=t(t({},I),i),this.options.useExtendedSearch,this._keyStore=new S(this.options.keys),this.setCollection(n,o)}return o(e,[{key:"setCollection",value:function(e,t){if(this._docs=e,t&&!(t instanceof $))throw new Error("Incorrect 'index' type");this._myIndex=t||F(this.options.keys,this._docs,{getFn:this.options.getFn,fieldNormWeight:this.options.fieldNormWeight})}},{key:"add",value:function(e){k(e)&&(this._docs.push(e),this._myIndex.add(e))}},{key:"remove",value:function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!1},t=[],n=0,r=this._docs.length;n1&&void 0!==arguments[1]?arguments[1]:{},n=t.limit,r=void 0===n?-1:n,i=this.options,o=i.includeMatches,c=i.includeScore,a=i.shouldSort,s=i.sortFn,u=i.ignoreFieldNorm,h=g(e)?g(this._docs[0])?this._searchStringList(e):this._searchObjectList(e):this._searchLogical(e);return fe(h,{ignoreFieldNorm:u}),a&&h.sort(s),y(r)&&r>-1&&(h=h.slice(0,r)),ge(h,this._docs,{includeMatches:o,includeScore:c})}},{key:"_searchStringList",value:function(e){var t=re(e,this.options),n=this._myIndex.records,r=[];return n.forEach((function(e){var n=e.v,i=e.i,o=e.n;if(k(n)){var c=t.searchIn(n),a=c.isMatch,s=c.score,u=c.indices;a&&r.push({item:n,idx:i,matches:[{score:s,value:n,norm:o,indices:u}]})}})),r}},{key:"_searchLogical",value:function(e){var t=this,n=function(e,t){var n=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).auto,r=void 0===n||n,i=function e(n){var i=Object.keys(n),o=ue(n);if(!o&&i.length>1&&!se(n))return e(le(n));if(he(n)){var c=o?n[ce]:i[0],a=o?n[ae]:n[c];if(!g(a))throw new Error(x(c));var s={keyId:j(c),pattern:a};return r&&(s.searcher=re(a,t)),s}var u={children:[],operator:i[0]};return i.forEach((function(t){var r=n[t];v(r)&&r.forEach((function(t){u.children.push(e(t))}))})),u};return se(e)||(e=le(e)),i(e)}(e,this.options),r=function e(n,r,i){if(!n.children){var o=n.keyId,c=n.searcher,a=t._findMatches({key:t._keyStore.get(o),value:t._myIndex.getValueForItemAtKeyId(r,o),searcher:c});return a&&a.length?[{idx:i,item:r,matches:a}]:[]}for(var s=[],u=0,h=n.children.length;u1&&void 0!==arguments[1]?arguments[1]:{},n=t.getFn,r=void 0===n?I.getFn:n,i=t.fieldNormWeight,o=void 0===i?I.fieldNormWeight:i,c=e.keys,a=e.records,s=new $({getFn:r,fieldNormWeight:o});return s.setKeys(c),s.setIndexRecords(a),s},ye.config=I,function(){ne.push.apply(ne,arguments)}(te),ye},"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).Fuse=t(); \ No newline at end of file diff --git a/site_libs/quarto-search/quarto-search.js b/site_libs/quarto-search/quarto-search.js new file mode 100644 index 000000000..aefb6a58f --- /dev/null +++ b/site_libs/quarto-search/quarto-search.js @@ -0,0 +1,1247 @@ +const kQueryArg = "q"; +const kResultsArg = "show-results"; + +// If items don't provide a URL, then both the navigator and the onSelect +// function aren't called (and therefore, the default implementation is used) +// +// We're using this sentinel URL to signal to those handlers that this +// item is a more item (along with the type) and can be handled appropriately +const kItemTypeMoreHref = "0767FDFD-0422-4E5A-BC8A-3BE11E5BBA05"; + +window.document.addEventListener("DOMContentLoaded", function (_event) { + // Ensure that search is available on this page. If it isn't, + // should return early and not do anything + var searchEl = window.document.getElementById("quarto-search"); + if (!searchEl) return; + + const { autocomplete } = window["@algolia/autocomplete-js"]; + + let quartoSearchOptions = {}; + let language = {}; + const searchOptionEl = window.document.getElementById( + "quarto-search-options" + ); + if (searchOptionEl) { + const jsonStr = searchOptionEl.textContent; + quartoSearchOptions = JSON.parse(jsonStr); + language = quartoSearchOptions.language; + } + + // note the search mode + if (quartoSearchOptions.type === "overlay") { + searchEl.classList.add("type-overlay"); + } else { + searchEl.classList.add("type-textbox"); + } + + // Used to determine highlighting behavior for this page + // A `q` query param is expected when the user follows a search + // to this page + const currentUrl = new URL(window.location); + const query = currentUrl.searchParams.get(kQueryArg); + const showSearchResults = currentUrl.searchParams.get(kResultsArg); + const mainEl = window.document.querySelector("main"); + + // highlight matches on the page + if (query && mainEl) { + // perform any highlighting + highlight(escapeRegExp(query), mainEl); + + // fix up the URL to remove the q query param + const replacementUrl = new URL(window.location); + replacementUrl.searchParams.delete(kQueryArg); + window.history.replaceState({}, "", replacementUrl); + } + + // function to clear highlighting on the page when the search query changes + // (e.g. if the user edits the query or clears it) + let highlighting = true; + const resetHighlighting = (searchTerm) => { + if (mainEl && highlighting && query && searchTerm !== query) { + clearHighlight(query, mainEl); + highlighting = false; + } + }; + + // Clear search highlighting when the user scrolls sufficiently + const resetFn = () => { + resetHighlighting(""); + window.removeEventListener("quarto-hrChanged", resetFn); + window.removeEventListener("quarto-sectionChanged", resetFn); + }; + + // Register this event after the initial scrolling and settling of events + // on the page + window.addEventListener("quarto-hrChanged", resetFn); + window.addEventListener("quarto-sectionChanged", resetFn); + + // Responsively switch to overlay mode if the search is present on the navbar + // Note that switching the sidebar to overlay mode requires more coordinate (not just + // the media query since we generate different HTML for sidebar overlays than we do + // for sidebar input UI) + const detachedMediaQuery = + quartoSearchOptions.type === "overlay" ? "all" : "(max-width: 991px)"; + + // If configured, include the analytics client to send insights + const plugins = configurePlugins(quartoSearchOptions); + + let lastState = null; + const { setIsOpen, setQuery, setCollections } = autocomplete({ + container: searchEl, + detachedMediaQuery: detachedMediaQuery, + defaultActiveItemId: 0, + panelContainer: "#quarto-search-results", + panelPlacement: quartoSearchOptions["panel-placement"], + debug: false, + openOnFocus: true, + plugins, + classNames: { + form: "d-flex", + }, + placeholder: language["search-text-placeholder"], + translations: { + clearButtonTitle: language["search-clear-button-title"], + detachedCancelButtonText: language["search-detached-cancel-button-title"], + submitButtonTitle: language["search-submit-button-title"], + }, + initialState: { + query, + }, + getItemUrl({ item }) { + return item.href; + }, + onStateChange({ state }) { + // If this is a file URL, note that + + // Perhaps reset highlighting + resetHighlighting(state.query); + + // If the panel just opened, ensure the panel is positioned properly + if (state.isOpen) { + if (lastState && !lastState.isOpen) { + setTimeout(() => { + positionPanel(quartoSearchOptions["panel-placement"]); + }, 150); + } + } + + // Perhaps show the copy link + showCopyLink(state.query, quartoSearchOptions); + + lastState = state; + }, + reshape({ sources, state }) { + return sources.map((source) => { + try { + const items = source.getItems(); + + // Validate the items + validateItems(items); + + // group the items by document + const groupedItems = new Map(); + items.forEach((item) => { + const hrefParts = item.href.split("#"); + const baseHref = hrefParts[0]; + const isDocumentItem = hrefParts.length === 1; + + const items = groupedItems.get(baseHref); + if (!items) { + groupedItems.set(baseHref, [item]); + } else { + // If the href for this item matches the document + // exactly, place this item first as it is the item that represents + // the document itself + if (isDocumentItem) { + items.unshift(item); + } else { + items.push(item); + } + groupedItems.set(baseHref, items); + } + }); + + const reshapedItems = []; + let count = 1; + for (const [_key, value] of groupedItems) { + const firstItem = value[0]; + reshapedItems.push({ + ...firstItem, + type: kItemTypeDoc, + }); + + const collapseMatches = quartoSearchOptions["collapse-after"]; + const collapseCount = + typeof collapseMatches === "number" ? collapseMatches : 1; + + if (value.length > 1) { + const target = `search-more-${count}`; + const isExpanded = + state.context.expanded && + state.context.expanded.includes(target); + + const remainingCount = value.length - collapseCount; + + for (let i = 1; i < value.length; i++) { + if (collapseMatches && i === collapseCount) { + reshapedItems.push({ + target, + title: isExpanded + ? language["search-hide-matches-text"] + : remainingCount === 1 + ? `${remainingCount} ${language["search-more-match-text"]}` + : `${remainingCount} ${language["search-more-matches-text"]}`, + type: kItemTypeMore, + href: kItemTypeMoreHref, + }); + } + + if (isExpanded || !collapseMatches || i < collapseCount) { + reshapedItems.push({ + ...value[i], + type: kItemTypeItem, + target, + }); + } + } + } + count += 1; + } + + return { + ...source, + getItems() { + return reshapedItems; + }, + }; + } catch (error) { + // Some form of error occurred + return { + ...source, + getItems() { + return [ + { + title: error.name || "An Error Occurred While Searching", + text: + error.message || + "An unknown error occurred while attempting to perform the requested search.", + type: kItemTypeError, + }, + ]; + }, + }; + } + }); + }, + navigator: { + navigate({ itemUrl }) { + if (itemUrl !== offsetURL(kItemTypeMoreHref)) { + window.location.assign(itemUrl); + } + }, + navigateNewTab({ itemUrl }) { + if (itemUrl !== offsetURL(kItemTypeMoreHref)) { + const windowReference = window.open(itemUrl, "_blank", "noopener"); + if (windowReference) { + windowReference.focus(); + } + } + }, + navigateNewWindow({ itemUrl }) { + if (itemUrl !== offsetURL(kItemTypeMoreHref)) { + window.open(itemUrl, "_blank", "noopener"); + } + }, + }, + getSources({ state, setContext, setActiveItemId, refresh }) { + return [ + { + sourceId: "documents", + getItemUrl({ item }) { + if (item.href) { + return offsetURL(item.href); + } else { + return undefined; + } + }, + onSelect({ + item, + state, + setContext, + setIsOpen, + setActiveItemId, + refresh, + }) { + if (item.type === kItemTypeMore) { + toggleExpanded(item, state, setContext, setActiveItemId, refresh); + + // Toggle more + setIsOpen(true); + } + }, + getItems({ query }) { + if (query === null || query === "") { + return []; + } + + const limit = quartoSearchOptions.limit; + if (quartoSearchOptions.algolia) { + return algoliaSearch(query, limit, quartoSearchOptions.algolia); + } else { + // Fuse search options + const fuseSearchOptions = { + isCaseSensitive: false, + shouldSort: true, + minMatchCharLength: 2, + limit: limit, + }; + + return readSearchData().then(function (fuse) { + return fuseSearch(query, fuse, fuseSearchOptions); + }); + } + }, + templates: { + noResults({ createElement }) { + const hasQuery = lastState.query; + + return createElement( + "div", + { + class: `quarto-search-no-results${ + hasQuery ? "" : " no-query" + }`, + }, + language["search-no-results-text"] + ); + }, + header({ items, createElement }) { + // count the documents + const count = items.filter((item) => { + return item.type === kItemTypeDoc; + }).length; + + if (count > 0) { + return createElement( + "div", + { class: "search-result-header" }, + `${count} ${language["search-matching-documents-text"]}` + ); + } else { + return createElement( + "div", + { class: "search-result-header-no-results" }, + `` + ); + } + }, + footer({ _items, createElement }) { + if ( + quartoSearchOptions.algolia && + quartoSearchOptions.algolia["show-logo"] + ) { + const libDir = quartoSearchOptions.algolia["libDir"]; + const logo = createElement("img", { + src: offsetURL( + `${libDir}/quarto-search/search-by-algolia.svg` + ), + class: "algolia-search-logo", + }); + return createElement( + "a", + { href: "http://www.algolia.com/" }, + logo + ); + } + }, + + item({ item, createElement }) { + return renderItem( + item, + createElement, + state, + setActiveItemId, + setContext, + refresh, + quartoSearchOptions + ); + }, + }, + }, + ]; + }, + }); + + window.quartoOpenSearch = () => { + setIsOpen(false); + setIsOpen(true); + focusSearchInput(); + }; + + document.addEventListener("keyup", (event) => { + const { key } = event; + const kbds = quartoSearchOptions["keyboard-shortcut"]; + const focusedEl = document.activeElement; + + const isFormElFocused = [ + "input", + "select", + "textarea", + "button", + "option", + ].find((tag) => { + return focusedEl.tagName.toLowerCase() === tag; + }); + + if ( + kbds && + kbds.includes(key) && + !isFormElFocused && + !document.activeElement.isContentEditable + ) { + event.preventDefault(); + window.quartoOpenSearch(); + } + }); + + // Remove the labeleledby attribute since it is pointing + // to a non-existent label + if (quartoSearchOptions.type === "overlay") { + const inputEl = window.document.querySelector( + "#quarto-search .aa-Autocomplete" + ); + if (inputEl) { + inputEl.removeAttribute("aria-labelledby"); + } + } + + function throttle(func, wait) { + let waiting = false; + return function () { + if (!waiting) { + func.apply(this, arguments); + waiting = true; + setTimeout(function () { + waiting = false; + }, wait); + } + }; + } + + // If the main document scrolls dismiss the search results + // (otherwise, since they're floating in the document they can scroll with the document) + window.document.body.onscroll = throttle(() => { + // Only do this if we're not detached + // Bug #7117 + // This will happen when the keyboard is shown on ios (resulting in a scroll) + // which then closed the search UI + if (!window.matchMedia(detachedMediaQuery).matches) { + setIsOpen(false); + } + }, 50); + + if (showSearchResults) { + setIsOpen(true); + focusSearchInput(); + } +}); + +function configurePlugins(quartoSearchOptions) { + const autocompletePlugins = []; + const algoliaOptions = quartoSearchOptions.algolia; + if ( + algoliaOptions && + algoliaOptions["analytics-events"] && + algoliaOptions["search-only-api-key"] && + algoliaOptions["application-id"] + ) { + const apiKey = algoliaOptions["search-only-api-key"]; + const appId = algoliaOptions["application-id"]; + + // Aloglia insights may not be loaded because they require cookie consent + // Use deferred loading so events will start being recorded when/if consent + // is granted. + const algoliaInsightsDeferredPlugin = deferredLoadPlugin(() => { + if ( + window.aa && + window["@algolia/autocomplete-plugin-algolia-insights"] + ) { + window.aa("init", { + appId, + apiKey, + useCookie: true, + }); + + const { createAlgoliaInsightsPlugin } = + window["@algolia/autocomplete-plugin-algolia-insights"]; + // Register the insights client + const algoliaInsightsPlugin = createAlgoliaInsightsPlugin({ + insightsClient: window.aa, + onItemsChange({ insights, insightsEvents }) { + const events = insightsEvents.flatMap((event) => { + // This API limits the number of items per event to 20 + const chunkSize = 20; + const itemChunks = []; + const eventItems = event.items; + for (let i = 0; i < eventItems.length; i += chunkSize) { + itemChunks.push(eventItems.slice(i, i + chunkSize)); + } + // Split the items into multiple events that can be sent + const events = itemChunks.map((items) => { + return { + ...event, + items, + }; + }); + return events; + }); + + for (const event of events) { + insights.viewedObjectIDs(event); + } + }, + }); + return algoliaInsightsPlugin; + } + }); + + // Add the plugin + autocompletePlugins.push(algoliaInsightsDeferredPlugin); + return autocompletePlugins; + } +} + +// For plugins that may not load immediately, create a wrapper +// plugin and forward events and plugin data once the plugin +// is initialized. This is useful for cases like cookie consent +// which may prevent the analytics insights event plugin from initializing +// immediately. +function deferredLoadPlugin(createPlugin) { + let plugin = undefined; + let subscribeObj = undefined; + const wrappedPlugin = () => { + if (!plugin && subscribeObj) { + plugin = createPlugin(); + if (plugin && plugin.subscribe) { + plugin.subscribe(subscribeObj); + } + } + return plugin; + }; + + return { + subscribe: (obj) => { + subscribeObj = obj; + }, + onStateChange: (obj) => { + const plugin = wrappedPlugin(); + if (plugin && plugin.onStateChange) { + plugin.onStateChange(obj); + } + }, + onSubmit: (obj) => { + const plugin = wrappedPlugin(); + if (plugin && plugin.onSubmit) { + plugin.onSubmit(obj); + } + }, + onReset: (obj) => { + const plugin = wrappedPlugin(); + if (plugin && plugin.onReset) { + plugin.onReset(obj); + } + }, + getSources: (obj) => { + const plugin = wrappedPlugin(); + if (plugin && plugin.getSources) { + return plugin.getSources(obj); + } else { + return Promise.resolve([]); + } + }, + data: (obj) => { + const plugin = wrappedPlugin(); + if (plugin && plugin.data) { + plugin.data(obj); + } + }, + }; +} + +function validateItems(items) { + // Validate the first item + if (items.length > 0) { + const item = items[0]; + const missingFields = []; + if (item.href == undefined) { + missingFields.push("href"); + } + if (!item.title == undefined) { + missingFields.push("title"); + } + if (!item.text == undefined) { + missingFields.push("text"); + } + + if (missingFields.length === 1) { + throw { + name: `Error: Search index is missing the ${missingFields[0]} field.`, + message: `The items being returned for this search do not include all the required fields. Please ensure that your index items include the ${missingFields[0]} field or use index-fields in your _quarto.yml file to specify the field names.`, + }; + } else if (missingFields.length > 1) { + const missingFieldList = missingFields + .map((field) => { + return `${field}`; + }) + .join(", "); + + throw { + name: `Error: Search index is missing the following fields: ${missingFieldList}.`, + message: `The items being returned for this search do not include all the required fields. Please ensure that your index items includes the following fields: ${missingFieldList}, or use index-fields in your _quarto.yml file to specify the field names.`, + }; + } + } +} + +let lastQuery = null; +function showCopyLink(query, options) { + const language = options.language; + lastQuery = query; + // Insert share icon + const inputSuffixEl = window.document.body.querySelector( + ".aa-Form .aa-InputWrapperSuffix" + ); + + if (inputSuffixEl) { + let copyButtonEl = window.document.body.querySelector( + ".aa-Form .aa-InputWrapperSuffix .aa-CopyButton" + ); + + if (copyButtonEl === null) { + copyButtonEl = window.document.createElement("button"); + copyButtonEl.setAttribute("class", "aa-CopyButton"); + copyButtonEl.setAttribute("type", "button"); + copyButtonEl.setAttribute("title", language["search-copy-link-title"]); + copyButtonEl.onmousedown = (e) => { + e.preventDefault(); + e.stopPropagation(); + }; + + const linkIcon = "bi-clipboard"; + const checkIcon = "bi-check2"; + + const shareIconEl = window.document.createElement("i"); + shareIconEl.setAttribute("class", `bi ${linkIcon}`); + copyButtonEl.appendChild(shareIconEl); + inputSuffixEl.prepend(copyButtonEl); + + const clipboard = new window.ClipboardJS(".aa-CopyButton", { + text: function (_trigger) { + const copyUrl = new URL(window.location); + copyUrl.searchParams.set(kQueryArg, lastQuery); + copyUrl.searchParams.set(kResultsArg, "1"); + return copyUrl.toString(); + }, + }); + clipboard.on("success", function (e) { + // Focus the input + + // button target + const button = e.trigger; + const icon = button.querySelector("i.bi"); + + // flash "checked" + icon.classList.add(checkIcon); + icon.classList.remove(linkIcon); + setTimeout(function () { + icon.classList.remove(checkIcon); + icon.classList.add(linkIcon); + }, 1000); + }); + } + + // If there is a query, show the link icon + if (copyButtonEl) { + if (lastQuery && options["copy-button"]) { + copyButtonEl.style.display = "flex"; + } else { + copyButtonEl.style.display = "none"; + } + } + } +} + +/* Search Index Handling */ +// create the index +var fuseIndex = undefined; +var shownWarning = false; +async function readSearchData() { + // Initialize the search index on demand + if (fuseIndex === undefined) { + if (window.location.protocol === "file:" && !shownWarning) { + window.alert( + "Search requires JavaScript features disabled when running in file://... URLs. In order to use search, please run this document in a web server." + ); + shownWarning = true; + return; + } + // create fuse index + const options = { + keys: [ + { name: "title", weight: 20 }, + { name: "section", weight: 20 }, + { name: "text", weight: 10 }, + ], + ignoreLocation: true, + threshold: 0.1, + }; + const fuse = new window.Fuse([], options); + + // fetch the main search.json + const response = await fetch(offsetURL("search.json")); + if (response.status == 200) { + return response.json().then(function (searchDocs) { + searchDocs.forEach(function (searchDoc) { + fuse.add(searchDoc); + }); + fuseIndex = fuse; + return fuseIndex; + }); + } else { + return Promise.reject( + new Error( + "Unexpected status from search index request: " + response.status + ) + ); + } + } + + return fuseIndex; +} + +function inputElement() { + return window.document.body.querySelector(".aa-Form .aa-Input"); +} + +function focusSearchInput() { + setTimeout(() => { + const inputEl = inputElement(); + if (inputEl) { + inputEl.focus(); + } + }, 50); +} + +/* Panels */ +const kItemTypeDoc = "document"; +const kItemTypeMore = "document-more"; +const kItemTypeItem = "document-item"; +const kItemTypeError = "error"; + +function renderItem( + item, + createElement, + state, + setActiveItemId, + setContext, + refresh, + quartoSearchOptions +) { + switch (item.type) { + case kItemTypeDoc: + return createDocumentCard( + createElement, + "file-richtext", + item.title, + item.section, + item.text, + item.href, + item.crumbs, + quartoSearchOptions + ); + case kItemTypeMore: + return createMoreCard( + createElement, + item, + state, + setActiveItemId, + setContext, + refresh + ); + case kItemTypeItem: + return createSectionCard( + createElement, + item.section, + item.text, + item.href + ); + case kItemTypeError: + return createErrorCard(createElement, item.title, item.text); + default: + return undefined; + } +} + +function createDocumentCard( + createElement, + icon, + title, + section, + text, + href, + crumbs, + quartoSearchOptions +) { + const iconEl = createElement("i", { + class: `bi bi-${icon} search-result-icon`, + }); + const titleEl = createElement("p", { class: "search-result-title" }, title); + const titleContents = [iconEl, titleEl]; + const showParent = quartoSearchOptions["show-item-context"]; + if (crumbs && showParent) { + let crumbsOut = undefined; + const crumbClz = ["search-result-crumbs"]; + if (showParent === "root") { + crumbsOut = crumbs.length > 1 ? crumbs[0] : undefined; + } else if (showParent === "parent") { + crumbsOut = crumbs.length > 1 ? crumbs[crumbs.length - 2] : undefined; + } else { + crumbsOut = crumbs.length > 1 ? crumbs.join(" > ") : undefined; + crumbClz.push("search-result-crumbs-wrap"); + } + + const crumbEl = createElement( + "p", + { class: crumbClz.join(" ") }, + crumbsOut + ); + titleContents.push(crumbEl); + } + + const titleContainerEl = createElement( + "div", + { class: "search-result-title-container" }, + titleContents + ); + + const textEls = []; + if (section) { + const sectionEl = createElement( + "p", + { class: "search-result-section" }, + section + ); + textEls.push(sectionEl); + } + const descEl = createElement("p", { + class: "search-result-text", + dangerouslySetInnerHTML: { + __html: text, + }, + }); + textEls.push(descEl); + + const textContainerEl = createElement( + "div", + { class: "search-result-text-container" }, + textEls + ); + + const containerEl = createElement( + "div", + { + class: "search-result-container", + }, + [titleContainerEl, textContainerEl] + ); + + const linkEl = createElement( + "a", + { + href: offsetURL(href), + class: "search-result-link", + }, + containerEl + ); + + const classes = ["search-result-doc", "search-item"]; + if (!section) { + classes.push("document-selectable"); + } + + return createElement( + "div", + { + class: classes.join(" "), + }, + linkEl + ); +} + +function createMoreCard( + createElement, + item, + state, + setActiveItemId, + setContext, + refresh +) { + const moreCardEl = createElement( + "div", + { + class: "search-result-more search-item", + onClick: (e) => { + // Handle expanding the sections by adding the expanded + // section to the list of expanded sections + toggleExpanded(item, state, setContext, setActiveItemId, refresh); + e.stopPropagation(); + }, + }, + item.title + ); + + return moreCardEl; +} + +function toggleExpanded(item, state, setContext, setActiveItemId, refresh) { + const expanded = state.context.expanded || []; + if (expanded.includes(item.target)) { + setContext({ + expanded: expanded.filter((target) => target !== item.target), + }); + } else { + setContext({ expanded: [...expanded, item.target] }); + } + + refresh(); + setActiveItemId(item.__autocomplete_id); +} + +function createSectionCard(createElement, section, text, href) { + const sectionEl = createSection(createElement, section, text, href); + return createElement( + "div", + { + class: "search-result-doc-section search-item", + }, + sectionEl + ); +} + +function createSection(createElement, title, text, href) { + const descEl = createElement("p", { + class: "search-result-text", + dangerouslySetInnerHTML: { + __html: text, + }, + }); + + const titleEl = createElement("p", { class: "search-result-section" }, title); + const linkEl = createElement( + "a", + { + href: offsetURL(href), + class: "search-result-link", + }, + [titleEl, descEl] + ); + return linkEl; +} + +function createErrorCard(createElement, title, text) { + const descEl = createElement("p", { + class: "search-error-text", + dangerouslySetInnerHTML: { + __html: text, + }, + }); + + const titleEl = createElement("p", { + class: "search-error-title", + dangerouslySetInnerHTML: { + __html: ` ${title}`, + }, + }); + const errorEl = createElement("div", { class: "search-error" }, [ + titleEl, + descEl, + ]); + return errorEl; +} + +function positionPanel(pos) { + const panelEl = window.document.querySelector( + "#quarto-search-results .aa-Panel" + ); + const inputEl = window.document.querySelector( + "#quarto-search .aa-Autocomplete" + ); + + if (panelEl && inputEl) { + panelEl.style.top = `${Math.round(panelEl.offsetTop)}px`; + if (pos === "start") { + panelEl.style.left = `${Math.round(inputEl.left)}px`; + } else { + panelEl.style.right = `${Math.round(inputEl.offsetRight)}px`; + } + } +} + +/* Highlighting */ +// highlighting functions +function highlightMatch(query, text) { + if (text) { + const start = text.toLowerCase().indexOf(query.toLowerCase()); + if (start !== -1) { + const startMark = ""; + const endMark = ""; + + const end = start + query.length; + text = + text.slice(0, start) + + startMark + + text.slice(start, end) + + endMark + + text.slice(end); + const startInfo = clipStart(text, start); + const endInfo = clipEnd( + text, + startInfo.position + startMark.length + endMark.length + ); + text = + startInfo.prefix + + text.slice(startInfo.position, endInfo.position) + + endInfo.suffix; + + return text; + } else { + return text; + } + } else { + return text; + } +} + +function clipStart(text, pos) { + const clipStart = pos - 50; + if (clipStart < 0) { + // This will just return the start of the string + return { + position: 0, + prefix: "", + }; + } else { + // We're clipping before the start of the string, walk backwards to the first space. + const spacePos = findSpace(text, pos, -1); + return { + position: spacePos.position, + prefix: "", + }; + } +} + +function clipEnd(text, pos) { + const clipEnd = pos + 200; + if (clipEnd > text.length) { + return { + position: text.length, + suffix: "", + }; + } else { + const spacePos = findSpace(text, clipEnd, 1); + return { + position: spacePos.position, + suffix: spacePos.clipped ? "…" : "", + }; + } +} + +function findSpace(text, start, step) { + let stepPos = start; + while (stepPos > -1 && stepPos < text.length) { + const char = text[stepPos]; + if (char === " " || char === "," || char === ":") { + return { + position: step === 1 ? stepPos : stepPos - step, + clipped: stepPos > 1 && stepPos < text.length, + }; + } + stepPos = stepPos + step; + } + + return { + position: stepPos - step, + clipped: false, + }; +} + +// removes highlighting as implemented by the mark tag +function clearHighlight(searchterm, el) { + const childNodes = el.childNodes; + for (let i = childNodes.length - 1; i >= 0; i--) { + const node = childNodes[i]; + if (node.nodeType === Node.ELEMENT_NODE) { + if ( + node.tagName === "MARK" && + node.innerText.toLowerCase() === searchterm.toLowerCase() + ) { + el.replaceChild(document.createTextNode(node.innerText), node); + } else { + clearHighlight(searchterm, node); + } + } + } +} + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string +} + +// highlight matches +function highlight(term, el) { + const termRegex = new RegExp(term, "ig"); + const childNodes = el.childNodes; + + // walk back to front avoid mutating elements in front of us + for (let i = childNodes.length - 1; i >= 0; i--) { + const node = childNodes[i]; + + if (node.nodeType === Node.TEXT_NODE) { + // Search text nodes for text to highlight + const text = node.nodeValue; + + let startIndex = 0; + let matchIndex = text.search(termRegex); + if (matchIndex > -1) { + const markFragment = document.createDocumentFragment(); + while (matchIndex > -1) { + const prefix = text.slice(startIndex, matchIndex); + markFragment.appendChild(document.createTextNode(prefix)); + + const mark = document.createElement("mark"); + mark.appendChild( + document.createTextNode( + text.slice(matchIndex, matchIndex + term.length) + ) + ); + markFragment.appendChild(mark); + + startIndex = matchIndex + term.length; + matchIndex = text.slice(startIndex).search(new RegExp(term, "ig")); + if (matchIndex > -1) { + matchIndex = startIndex + matchIndex; + } + } + if (startIndex < text.length) { + markFragment.appendChild( + document.createTextNode(text.slice(startIndex, text.length)) + ); + } + + el.replaceChild(markFragment, node); + } + } else if (node.nodeType === Node.ELEMENT_NODE) { + // recurse through elements + highlight(term, node); + } + } +} + +/* Link Handling */ +// get the offset from this page for a given site root relative url +function offsetURL(url) { + var offset = getMeta("quarto:offset"); + return offset ? offset + url : url; +} + +// read a meta tag value +function getMeta(metaName) { + var metas = window.document.getElementsByTagName("meta"); + for (let i = 0; i < metas.length; i++) { + if (metas[i].getAttribute("name") === metaName) { + return metas[i].getAttribute("content"); + } + } + return ""; +} + +function algoliaSearch(query, limit, algoliaOptions) { + const { getAlgoliaResults } = window["@algolia/autocomplete-preset-algolia"]; + + const applicationId = algoliaOptions["application-id"]; + const searchOnlyApiKey = algoliaOptions["search-only-api-key"]; + const indexName = algoliaOptions["index-name"]; + const indexFields = algoliaOptions["index-fields"]; + const searchClient = window.algoliasearch(applicationId, searchOnlyApiKey); + const searchParams = algoliaOptions["params"]; + const searchAnalytics = !!algoliaOptions["analytics-events"]; + + return getAlgoliaResults({ + searchClient, + queries: [ + { + indexName: indexName, + query, + params: { + hitsPerPage: limit, + clickAnalytics: searchAnalytics, + ...searchParams, + }, + }, + ], + transformResponse: (response) => { + if (!indexFields) { + return response.hits.map((hit) => { + return hit.map((item) => { + return { + ...item, + text: highlightMatch(query, item.text), + }; + }); + }); + } else { + const remappedHits = response.hits.map((hit) => { + return hit.map((item) => { + const newItem = { ...item }; + ["href", "section", "title", "text", "crumbs"].forEach( + (keyName) => { + const mappedName = indexFields[keyName]; + if ( + mappedName && + item[mappedName] !== undefined && + mappedName !== keyName + ) { + newItem[keyName] = item[mappedName]; + delete newItem[mappedName]; + } + } + ); + newItem.text = highlightMatch(query, newItem.text); + return newItem; + }); + }); + return remappedHits; + } + }, + }); +} + +function fuseSearch(query, fuse, fuseOptions) { + return fuse.search(query, fuseOptions).map((result) => { + const addParam = (url, name, value) => { + const anchorParts = url.split("#"); + const baseUrl = anchorParts[0]; + const sep = baseUrl.search("\\?") > 0 ? "&" : "?"; + anchorParts[0] = baseUrl + sep + name + "=" + value; + return anchorParts.join("#"); + }; + + return { + title: result.item.title, + section: result.item.section, + href: addParam(result.item.href, kQueryArg, query), + text: highlightMatch(query, result.item.text), + crumbs: result.item.crumbs, + }; + }); +} diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..f0492b0cc --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,339 @@ + + + + https://timeseriesAI.github.io/tsai/models.tstplus.html + 2024-02-11T18:43:47.954Z + + + https://timeseriesAI.github.io/tsai/tutorials.html + 2024-02-11T18:43:47.790Z + + + https://timeseriesAI.github.io/tsai/models.gatedtabtransformer.html + 2024-02-11T18:43:47.738Z + + + https://timeseriesAI.github.io/tsai/models.xcmplus.html + 2024-02-11T18:43:47.682Z + + + https://timeseriesAI.github.io/tsai/models.minirocket_pytorch.html + 2024-02-11T18:43:47.602Z + + + https://timeseriesAI.github.io/tsai/models.rnn.html + 2024-02-11T18:43:47.410Z + + + https://timeseriesAI.github.io/tsai/models.xcm.html + 2024-02-11T18:43:47.078Z + + + https://timeseriesAI.github.io/tsai/models.tsitplus.html + 2024-02-11T18:43:46.962Z + + + https://timeseriesAI.github.io/tsai/data.metadatasets.html + 2024-02-11T18:43:46.850Z + + + https://timeseriesAI.github.io/tsai/models.multiinputnet.html + 2024-02-11T18:43:46.998Z + + + https://timeseriesAI.github.io/tsai/utils.html + 2024-02-11T18:43:47.750Z + + + https://timeseriesAI.github.io/tsai/models.inceptiontimeplus.html + 2024-02-11T18:43:46.398Z + + + https://timeseriesAI.github.io/tsai/data.mixed_augmentation.html + 2024-02-11T18:43:46.238Z + + + https://timeseriesAI.github.io/tsai/data.tabular.html + 2024-02-11T18:43:46.166Z + + + https://timeseriesAI.github.io/tsai/callback.predictiondynamics.html + 2024-02-11T18:43:46.042Z + + + https://timeseriesAI.github.io/tsai/models.rocket_pytorch.html + 2024-02-11T18:43:45.958Z + + + https://timeseriesAI.github.io/tsai/models.hydraplus.html + 2024-02-11T18:43:45.874Z + + + https://timeseriesAI.github.io/tsai/wandb.html + 2024-02-11T18:43:45.902Z + + + https://timeseriesAI.github.io/tsai/models.positional_encoders.html + 2024-02-11T18:43:45.666Z + + + https://timeseriesAI.github.io/tsai/models.minirocket.html + 2024-02-11T18:43:45.646Z + + + https://timeseriesAI.github.io/tsai/models.utils.html + 2024-02-11T18:43:45.734Z + + + https://timeseriesAI.github.io/tsai/optuna.html + 2024-02-11T18:43:45.518Z + + + https://timeseriesAI.github.io/tsai/models.multirocketplus.html + 2024-02-11T18:43:45.526Z + + + https://timeseriesAI.github.io/tsai/models.tabmodel.html + 2024-02-11T18:43:45.454Z + + + https://timeseriesAI.github.io/tsai/models.hydramultirocketplus.html + 2024-02-11T18:43:45.230Z + + + https://timeseriesAI.github.io/tsai/models.tsperceiver.html + 2024-02-11T18:43:45.438Z + + + https://timeseriesAI.github.io/tsai/data.mixed.html + 2024-02-11T18:43:44.986Z + + + https://timeseriesAI.github.io/tsai/data.external.html + 2024-02-11T18:43:45.406Z + + + https://timeseriesAI.github.io/tsai/metrics.html + 2024-02-11T18:43:44.746Z + + + https://timeseriesAI.github.io/tsai/models.xresnet1dplus.html + 2024-02-11T18:43:45.386Z + + + https://timeseriesAI.github.io/tsai/models.rnn_fcn.html + 2024-02-11T18:43:44.482Z + + + https://timeseriesAI.github.io/tsai/models.mlp.html + 2024-02-11T18:43:44.422Z + + + https://timeseriesAI.github.io/tsai/models.tst.html + 2024-02-11T18:43:44.370Z + + + https://timeseriesAI.github.io/tsai/data.validation.html + 2024-02-11T18:43:44.278Z + + + https://timeseriesAI.github.io/tsai/data.features.html + 2024-02-11T18:43:43.866Z + + + https://timeseriesAI.github.io/tsai/models.resnet.html + 2024-02-11T18:43:43.602Z + + + https://timeseriesAI.github.io/tsai/models.mwdn.html + 2024-02-11T18:43:43.286Z + + + https://timeseriesAI.github.io/tsai/models.fcn.html + 2024-02-11T18:43:43.198Z + + + https://timeseriesAI.github.io/tsai/models.gmlp.html + 2024-02-11T18:43:43.090Z + + + https://timeseriesAI.github.io/tsai/models.transformerrnnplus.html + 2024-02-11T18:43:43.962Z + + + https://timeseriesAI.github.io/tsai/models.rnn_fcnplus.html + 2024-02-11T18:43:43.038Z + + + https://timeseriesAI.github.io/tsai/models.explainability.html + 2024-02-11T18:43:42.718Z + + + https://timeseriesAI.github.io/tsai/callback.noisy_student.html + 2024-02-11T18:43:43.330Z + + + https://timeseriesAI.github.io/tsai/data.preparation.html + 2024-02-11T18:43:42.758Z + + + https://timeseriesAI.github.io/tsai/models.tabtransformer.html + 2024-02-11T18:43:42.842Z + + + https://timeseriesAI.github.io/tsai/models.multimodal.html + 2024-02-11T18:43:43.146Z + + + https://timeseriesAI.github.io/tsai/models.inceptiontime.html + 2024-02-11T18:43:43.166Z + + + https://timeseriesAI.github.io/tsai/models.patchtst.html + 2024-02-11T18:43:44.890Z + + + https://timeseriesAI.github.io/tsai/models.rescnn.html + 2024-02-11T18:43:43.342Z + + + https://timeseriesAI.github.io/tsai/models.rnnattention.html + 2024-02-11T18:43:43.486Z + + + https://timeseriesAI.github.io/tsai/models.layers.html + 2024-02-11T18:43:44.386Z + + + https://timeseriesAI.github.io/tsai/models.tabfusiontransformer.html + 2024-02-11T18:43:44.282Z + + + https://timeseriesAI.github.io/tsai/models.omniscalecnn.html + 2024-02-11T18:43:44.394Z + + + https://timeseriesAI.github.io/tsai/callback.core.html + 2024-02-11T18:43:44.718Z + + + https://timeseriesAI.github.io/tsai/models.fcnplus.html + 2024-02-11T18:43:44.486Z + + + https://timeseriesAI.github.io/tsai/models.resnetplus.html + 2024-02-11T18:43:44.590Z + + + https://timeseriesAI.github.io/tsai/callback.experimental.html + 2024-02-11T18:43:44.834Z + + + https://timeseriesAI.github.io/tsai/models.transformermodel.html + 2024-02-11T18:43:44.886Z + + + https://timeseriesAI.github.io/tsai/index.html + 2024-02-11T18:43:44.938Z + + + https://timeseriesAI.github.io/tsai/data.image.html + 2024-02-11T18:43:45.126Z + + + https://timeseriesAI.github.io/tsai/callback.mvp.html + 2024-02-11T18:43:45.398Z + + + https://timeseriesAI.github.io/tsai/models.xceptiontimeplus.html + 2024-02-11T18:43:45.498Z + + + https://timeseriesAI.github.io/tsai/models.misc.html + 2024-02-11T18:43:45.502Z + + + https://timeseriesAI.github.io/tsai/models.tssequencerplus.html + 2024-02-11T18:43:45.594Z + + + https://timeseriesAI.github.io/tsai/inference.html + 2024-02-11T18:43:45.582Z + + + https://timeseriesAI.github.io/tsai/analysis.html + 2024-02-11T18:43:45.930Z + + + https://timeseriesAI.github.io/tsai/calibration.html + 2024-02-11T18:43:45.782Z + + + https://timeseriesAI.github.io/tsai/data.core.html + 2024-02-11T18:43:46.766Z + + + https://timeseriesAI.github.io/tsai/models.xresnet1d.html + 2024-02-11T18:43:46.774Z + + + https://timeseriesAI.github.io/tsai/tslearner.html + 2024-02-11T18:43:46.082Z + + + https://timeseriesAI.github.io/tsai/models.rocket.html + 2024-02-11T18:43:46.110Z + + + https://timeseriesAI.github.io/tsai/models.xceptiontime.html + 2024-02-11T18:43:46.178Z + + + https://timeseriesAI.github.io/tsai/models.tcn.html + 2024-02-11T18:43:46.418Z + + + https://timeseriesAI.github.io/tsai/data.unwindowed.html + 2024-02-11T18:43:46.458Z + + + https://timeseriesAI.github.io/tsai/optimizer.html + 2024-02-11T18:43:46.502Z + + + https://timeseriesAI.github.io/tsai/losses.html + 2024-02-11T18:43:46.858Z + + + https://timeseriesAI.github.io/tsai/data.preprocessing.html + 2024-02-11T18:43:47.858Z + + + https://timeseriesAI.github.io/tsai/learner.html + 2024-02-11T18:43:47.490Z + + + https://timeseriesAI.github.io/tsai/export.html + 2024-02-11T18:43:47.342Z + + + https://timeseriesAI.github.io/tsai/models.rnnplus.html + 2024-02-11T18:43:47.514Z + + + https://timeseriesAI.github.io/tsai/models.convtranplus.html + 2024-02-11T18:43:47.690Z + + + https://timeseriesAI.github.io/tsai/models.minirocketplus_pytorch.html + 2024-02-11T18:43:47.854Z + + + https://timeseriesAI.github.io/tsai/data.transforms.html + 2024-02-11T18:43:48.278Z + + + https://timeseriesAI.github.io/tsai/models.rnnattentionplus.html + 2024-02-11T18:43:47.978Z + + diff --git a/styles.css b/styles.css new file mode 100644 index 000000000..34daa64a4 --- /dev/null +++ b/styles.css @@ -0,0 +1,21 @@ +div.description { + font-style: italic; +} + +.cell-output pre { + margin-left: 0.8rem; + margin-top: 0; + background: none; + border-left: 2px solid lightsalmon; + border-top-left-radius: 0; + border-top-right-radius: 0; + } + + .cell-output .sourceCode { + background: none; + margin-top: 0; + } + + .cell > .sourceCode { + margin-bottom: 0; + } diff --git a/tslearner.html b/tslearner.html new file mode 100644 index 000000000..22dc3735e --- /dev/null +++ b/tslearner.html @@ -0,0 +1,1792 @@ + + + + + + + + + +tsai - TSLearner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

TSLearner

+
+ + + +
+ + + + +
+ + + +
+ + + +

New set of time series learners with a new sklearn-like API that simplifies the learner creation. The following classes are included:

+
    +
  • TSClassifier
  • +
  • TSRegressor
  • +
  • TSForecaster
  • +
+
+

TSClassifier API

+
+

Commonly used arguments:

+
    +
  • X: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.
  • +
  • y: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.
  • +
  • splits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.
  • +
  • tfms: item transforms that will be applied to each sample individually. Default:None.
  • +
  • batch_tfms: transforms applied to each batch. Default=None.
  • +
  • pipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.
  • +
  • bs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).
  • +
  • arch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.
  • +
  • arch_config: keyword arguments passed to the selected architecture. Default={}.
  • +
  • pretrained: indicates if pretrained model weights will be used. Default=False.
  • +
  • weights_path: indicates the path to the pretrained weights in case they are used.
  • +
  • loss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).
  • +
  • opt_func: allows you to pass an optimizer. Default=Adam.
  • +
  • lr: learning rate. Default=0.001.
  • +
  • metrics: list of metrics passed to the Learner. Default=accuracy.
  • +
  • cbs: list of callbacks passed to the Learner. Default=None.
  • +
  • wd: is the default weight decay used when training the model. Default=None.
  • +
+

Less frequently used arguments:

+
    +
  • sel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).
  • +
  • sel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).
  • +
  • s_cat_idxs: list of indices for static categorical variables
  • +
  • s_cat_embeddings: list of num_embeddings for each static categorical variable
  • +
  • s_cat_embedding_dims: list of embedding dimensions for each static categorical variable
  • +
  • s_cont_idxs: list of indices for static continuous variables
  • +
  • o_cat_idxs: list of indices for observed categorical variables
  • +
  • o_cat_embeddings: list of num_embeddings for each observed categorical variable
  • +
  • o_cat_embedding_dims: list of embedding dimensions for each observed categorical variable
  • +
  • o_cont_idxs: list of indices for observed continuous variables
  • +
  • patch_len: Number of time steps in each patch.
  • +
  • patch_stride: Stride of the patch.
  • +
  • fusion_layers: list of layer dimensions for the fusion MLP
  • +
  • fusion_act: activation function for the fusion MLP
  • +
  • fusion_dropout: dropout probability for the fusion MLP
  • +
  • fusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP
  • +
  • weights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.
  • +
  • partial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.
  • +
  • vocab: vocabulary used to transform the target. Only required when transformed is not perform by a dataloader’s tfm (external transforms).
  • +
  • train_metrics: flag used to display metrics in the training set. Defaults to False.
  • +
  • valid_metrics: flag used to display metrics in the validtion set. Defaults to True.
  • +
  • inplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.
  • +
  • shuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.
  • +
  • drop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.
  • +
  • num_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=0.
  • +
  • do_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.
  • +
  • device: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).
  • +
  • seed: Set to an int to ensure reprodubibility. Default=None.
  • +
  • verbose: controls the verbosity when fitting and predicting.
  • +
  • exclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.
  • +
  • cut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.
  • +
  • init: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.
  • +
  • splitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.
  • +
  • path and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.
  • +
  • wd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True
  • +
  • moms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).
  • +
+
+

source

+
+

TSClassifier

+
+
 TSClassifier (X, y=None, splits=None, tfms=None, inplace=True,
+               sel_vars=None, sel_steps=None, s_cat_idxs=None,
+               s_cat_embeddings=None, s_cat_embedding_dims=None,
+               s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,
+               o_cat_embedding_dims=None, o_cont_idxs=None,
+               patch_len=None, patch_stride=None, fusion_layers=128,
+               fusion_act='relu', fusion_dropout=0.0, fusion_use_bn=True,
+               weights=None, partial_n=None, vocab=None,
+               train_metrics=False, valid_metrics=True, bs=[64, 128],
+               batch_size=None, batch_tfms=None, pipelines=None,
+               shuffle_train=True, drop_last=True, num_workers=0,
+               do_setup=True, device=None, seed=None, arch=None,
+               arch_config={}, pretrained=False, weights_path=None,
+               exclude_head=True, cut=-1, init=None, loss_func=None,
+               opt_func=<function Adam>, lr=0.001, metrics=<function
+               accuracy>, cbs=None, wd=None, wd_bn_bias=False,
+               train_bn=True, moms=(0.95, 0.85, 0.95), path='.',
+               model_dir='models', splitter=<function trainable_params>,
+               verbose=False)
+
+

Group together a model, some dls and a loss_func to handle training

+
+
from tsai.data.external import *
+from tsai.data.preprocessing import *
+from tsai.models.InceptionTimePlus import *
+
+
+
# With validation split
+X, y, splits = get_classification_data('OliveOil', split_data=False)
+tfms = [None, TSClassification()]
+batch_tfms = [TSStandardize(by_sample=True)]
+learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, arch=InceptionTimePlus, arch_config=dict(fc_dropout=.5),
+                     train_metrics=True)
+learn.fit_one_cycle(1)
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + +
epochtrain_losstrain_accuracyvalid_lossvalid_accuracytime
01.4462550.2666671.4033590.30000000:00
+
+
+
+
# Without validation split
+X, y, splits = get_classification_data('OliveOil', split_data=False)
+splits = (splits[0], None)
+tfms = [None, TSClassification()]
+batch_tfms = [TSStandardize(by_sample=True)]
+learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, metrics=accuracy, arch=InceptionTimePlus, arch_config=dict(fc_dropout=.5),
+                     train_metrics=True)
+learn.fit_one_cycle(1)
+
+ + +
+
+ + + + + + + + + + + + + + + + + +
epochtrain_lossaccuracytime
01.2860230.40000000:00
+
+
+
+
num_classes = 5
+X = torch.rand(8, 2, 50)
+y = torch.randint(0, num_classes, (len(X), 3, 50))
+splits = TimeSplitter(show_plot=False)(y)
+vocab = np.arange(num_classes)
+
+fail_test = []
+for arch in all_arch_names:
+    if not "plus" in arch.lower(): continue
+    try:
+        learn = TSClassifier(X, y, splits=splits, arch=arch, metrics=accuracy, vocab=vocab, device=default_device())
+        with ContextManagers([learn.no_bar(), learn.no_logging()]):
+            learn.fit_one_cycle(1, 1e-3)
+        del learn
+        gc.collect()
+    except Exception as e:
+        fail_test.append(arch)
+        print(arch, e)
+
+test_eq(fail_test, [])
+
+
+
+
+

TSRegressor API

+
+

Commonly used arguments:

+
    +
  • X: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.
  • +
  • y: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.
  • +
  • splits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.
  • +
  • tfms: item transforms that will be applied to each sample individually. Default=None.
  • +
  • batch_tfms: transforms applied to each batch. Default=None.
  • +
  • pipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.
  • +
  • bs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).
  • +
  • arch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.
  • +
  • arch_config: keyword arguments passed to the selected architecture. Default={}.
  • +
  • pretrained: indicates if pretrained model weights will be used. Default=False.
  • +
  • weights_path: indicates the path to the pretrained weights in case they are used.
  • +
  • loss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).
  • +
  • opt_func: allows you to pass an optimizer. Default=Adam.
  • +
  • lr: learning rate. Default=0.001.
  • +
  • metrics: list of metrics passed to the Learner. Default=None.
  • +
  • cbs: list of callbacks passed to the Learner. Default=None.
  • +
  • wd: is the default weight decay used when training the model. Default=None.
  • +
+

Less frequently used arguments:

+
    +
  • sel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).
  • +
  • sel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).
  • +
  • s_cat_idxs: list of indices for static categorical variables
  • +
  • s_cat_embeddings: list of num_embeddings for each static categorical variable
  • +
  • s_cat_embedding_dims: list of embedding dimensions for each static categorical variable
  • +
  • s_cont_idxs: list of indices for static continuous variables
  • +
  • o_cat_idxs: list of indices for observed categorical variables
  • +
  • o_cat_embeddings: list of num_embeddings for each observed categorical variable
  • +
  • o_cat_embedding_dims: list of embedding dimensions for each observed categorical variable
  • +
  • o_cont_idxs: list of indices for observed continuous variables
  • +
  • patch_len: Number of time steps in each patch.
  • +
  • patch_stride: Stride of the patch.
  • +
  • fusion_layers: list of layer dimensions for the fusion MLP
  • +
  • fusion_act: activation function for the fusion MLP
  • +
  • fusion_dropout: dropout probability for the fusion MLP
  • +
  • fusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP
  • +
  • weights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.
  • +
  • partial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.
  • +
  • train_metrics: flag used to display metrics in the training set. Defaults to False.
  • +
  • valid_metrics: flag used to display metrics in the validtion set. Defaults to True.
  • +
  • inplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.
  • +
  • shuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.
  • +
  • drop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.
  • +
  • num_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=0.
  • +
  • do_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.
  • +
  • device: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).
  • +
  • seed: Set to an int to ensure reprodubibility. Default=None.
  • +
  • verbose: controls the verbosity when fitting and predicting.
  • +
  • exclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.
  • +
  • cut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.
  • +
  • init: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.
  • +
  • splitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.
  • +
  • path and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.
  • +
  • wd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True
  • +
  • moms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).
  • +
+
+

source

+
+

TSRegressor

+
+
 TSRegressor (X, y=None, splits=None, tfms=None, inplace=True,
+              sel_vars=None, sel_steps=None, s_cat_idxs=None,
+              s_cat_embeddings=None, s_cat_embedding_dims=None,
+              s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,
+              o_cat_embedding_dims=None, o_cont_idxs=None, patch_len=None,
+              patch_stride=None, fusion_layers=128, fusion_act='relu',
+              fusion_dropout=0.0, fusion_use_bn=True, weights=None,
+              partial_n=None, train_metrics=False, valid_metrics=True,
+              bs=[64, 128], batch_size=None, batch_tfms=None,
+              pipelines=None, shuffle_train=True, drop_last=True,
+              num_workers=0, do_setup=True, device=None, seed=None,
+              arch=None, arch_config={}, pretrained=False,
+              weights_path=None, exclude_head=True, cut=-1, init=None,
+              loss_func=None, opt_func=<function Adam>, lr=0.001,
+              metrics=None, cbs=None, wd=None, wd_bn_bias=False,
+              train_bn=True, moms=(0.95, 0.85, 0.95), path='.',
+              model_dir='models', splitter=<function trainable_params>,
+              verbose=False)
+
+

Group together a model, some dls and a loss_func to handle training

+
+
X, y, splits = get_regression_data('AppliancesEnergy', split_data=False)
+if X is not None: # This is to prevent a test fail when the data server is not available
+    X = X.astype('float32')
+    y = y.astype('float32')
+    batch_tfms = [TSStandardize()]
+    learn = TSRegressor(X, y, splits=splits, batch_tfms=batch_tfms, arch=None, metrics=mae, bs=512, train_metrics=True, device=default_device())
+    learn.fit_one_cycle(1, 1e-4)
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + +
epochtrain_losstrain_maevalid_lossvalid_maetime
0221.23957814.241582208.78723114.03432800:00
+
+
+
+
+
+

TSForecaster API

+
+

Commonly used arguments:

+
    +
  • X: array-like of shape (n_samples, n_steps) or (n_samples, n_features, n_steps) with the input time series samples. Internally, they will be converted to torch tensors.
  • +
  • y: array-like of shape (n_samples), (n_samples, n_outputs) or (n_samples, n_features, n_outputs) with the target. Internally, they will be converted to torch tensors. Default=None. None is used for unlabeled datasets.
  • +
  • splits: lists of indices used to split data between train and validation. Default=None. If no splits are passed, data will be split 100:0 between train and test without shuffling.
  • +
  • tfms: item transforms that will be applied to each sample individually. Default=None.
  • +
  • batch_tfms: transforms applied to each batch. Default=None.
  • +
  • pipelines: store sklearn-type pipelines that can then be applied to pandas dataframes with transform or inverse_transform methods. Default=None.
  • +
  • bs: batch size (if batch_size is provided then batch_size will override bs). An int or a list of ints can be passed. Default=[64, 128]. If a list of ints, the first one will be used for training, and the second for the valid (batch size can be larger as it doesn’t require backpropagation which consumes more memory).
  • +
  • arch: indicates which architecture will be used. Alternatively, you can pass an instantiated model. Default: InceptionTimePlus.
  • +
  • arch_config: keyword arguments passed to the selected architecture. Default={}.
  • +
  • pretrained: indicates if pretrained model weights will be used. Default=False.
  • +
  • weights_path: indicates the path to the pretrained weights in case they are used.
  • +
  • loss_func: allows you to pass any loss function. Default=None (in which case CrossEntropyLossFlat() is applied).
  • +
  • opt_func: allows you to pass an optimizer. Default=Adam.
  • +
  • lr: learning rate. Default=0.001.
  • +
  • metrics: list of metrics passed to the Learner. Default=None.
  • +
  • cbs: list of callbacks passed to the Learner. Default=None.
  • +
  • wd: is the default weight decay used when training the model. Default=None.
  • +
+

Less frequently used arguments:

+
    +
  • sel_vars: used to select which of the features in multivariate datasets are used. Default=None means all features are used. If necessary a list-like of indices can be used (eg.[0,3,5]).
  • +
  • sel_steps: used to select the steps used. Default=None means all steps are used. If necessary a list-like of indices can be used (eg. slice(-50, None) will select the last 50 steps from each time series).
  • +
  • s_cat_idxs: list of indices for static categorical variables
  • +
  • s_cat_embeddings: list of num_embeddings for each static categorical variable
  • +
  • s_cat_embedding_dims: list of embedding dimensions for each static categorical variable
  • +
  • s_cont_idxs: list of indices for static continuous variables
  • +
  • o_cat_idxs: list of indices for observed categorical variables
  • +
  • o_cat_embeddings: list of num_embeddings for each observed categorical variable
  • +
  • o_cat_embedding_dims: list of embedding dimensions for each observed categorical variable
  • +
  • o_cont_idxs: list of indices for observed continuous variables
  • +
  • patch_len: Number of time steps in each patch.
  • +
  • patch_stride: Stride of the patch.
  • +
  • fusion_layers: list of layer dimensions for the fusion MLP
  • +
  • fusion_act: activation function for the fusion MLP
  • +
  • fusion_dropout: dropout probability for the fusion MLP
  • +
  • fusion_use_bn: boolean indicating whether to use batch normalization in the fusion MLP
  • +
  • weights: indicates a sample weight per instance. Used to pass pass a probability to the train dataloader sampler. Samples with more weight will be selected more often during training.
  • +
  • partial_n: select randomly partial quantity of data at each epoch. Used to reduce the training size (for example for testing purposes). int or float can be used.
  • +
  • train_metrics: flag used to display metrics in the training set. Defaults to False.
  • +
  • valid_metrics: flag used to display metrics in the validtion set. Defaults to True.
  • +
  • inplace: indicates whether tfms are applied during instantiation or on-the-fly. Default=True, which means that tfms will be applied during instantiation. This results in a faster training, but it can only be used when data fits in memory. Otherwise set it to False.
  • +
  • shuffle_train: indicates whether to shuffle the training set every time the dataloader is fully read/iterated or not. This doesn’t have an impact on the validation set which is never shuffled. Default=True.
  • +
  • drop_last: if True the last incomplete training batch is dropped (thus ensuring training batches of equal size). This doesn’t have an impact on the validation set where samples are never dropped. Default=True.
  • +
  • num_workers: num_workers (int): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. Default=None.
  • +
  • do_setup: ndicates if the Pipeline.setup method should be called during initialization. Default=True.
  • +
  • device: Defaults to default_device() which is CUDA by default. You can specify device as `torch.device(‘cpu’).
  • +
  • seed: Set to an int to ensure reprodubibility. Default=None.
  • +
  • verbose: controls the verbosity when fitting and predicting.
  • +
  • exclude_head: indicates whether the head of the pretrained model needs to be removed or not. Default=True.
  • +
  • cut: indicates the position where the pretrained model head needs to be cut. Defaults=-1.
  • +
  • init: allows you to set to None (no initialization applied), set to True (in which case nn.init.kaiming_normal_ will be applied) or pass an initialization. Default=None.
  • +
  • splitter: To do transfer learning, you need to pass a splitter to Learner. This should be a function taking the model and returning a collection of parameter groups, e.g. a list of list of parameters. Default=trainable_params. If the model has a backbone and a head, it will then be split in those 2 groups.
  • +
  • path and model_dir: are used to save and/or load models. Often path will be inferred from dls, but you can override it or pass a Path object to model_dir.
  • +
  • wd_bn_bias: controls if weight decay is applied to BatchNorm layers and bias. Default=False. train_bn=True
  • +
  • moms: the default momentums used in Learner.fit_one_cycle. Default=(0.95, 0.85, 0.95).
  • +
+
+

source

+
+

TSForecaster

+
+
 TSForecaster (X, y=None, splits=None, tfms=None, inplace=True,
+               sel_vars=None, sel_steps=None, s_cat_idxs=None,
+               s_cat_embeddings=None, s_cat_embedding_dims=None,
+               s_cont_idxs=None, o_cat_idxs=None, o_cat_embeddings=None,
+               o_cat_embedding_dims=None, o_cont_idxs=None,
+               patch_len=None, patch_stride=None, fusion_layers=128,
+               fusion_act='relu', fusion_dropout=0.0, fusion_use_bn=True,
+               weights=None, partial_n=None, train_metrics=False,
+               valid_metrics=True, bs=[64, 128], batch_size=None,
+               batch_tfms=None, pipelines=None, shuffle_train=True,
+               drop_last=True, num_workers=0, do_setup=True, device=None,
+               seed=None, arch=None, arch_config={}, pretrained=False,
+               weights_path=None, exclude_head=True, cut=-1, init=None,
+               loss_func=None, opt_func=<function Adam>, lr=0.001,
+               metrics=None, cbs=None, wd=None, wd_bn_bias=False,
+               train_bn=True, moms=(0.95, 0.85, 0.95), path='.',
+               model_dir='models', splitter=<function trainable_params>,
+               verbose=False)
+
+

Group together a model, some dls and a loss_func to handle training

+
+
from tsai.data.preparation import *
+
+
+
ts = get_forecasting_time_series('Sunspots')
+if ts is not None: # This is to prevent a test fail when the data server is not available
+    X, y = SlidingWindowSplitter(60, horizon=1)(ts)
+    X, y = X.astype('float32'), y.astype('float32')
+    splits = TSSplitter(235)(y)
+    batch_tfms = [TSStandardize(by_var=True)]
+    learn = TSForecaster(X, y, splits=splits, batch_tfms=batch_tfms, arch=None, arch_config=dict(fc_dropout=.5), metrics=mae, bs=512,
+                         partial_n=.1, train_metrics=True, device=default_device())
+    learn.fit_one_cycle(1)
+
+
Dataset: Sunspots
+downloading data...
+...done. Path = data/forecasting/Sunspots.csv
+
+
+
+
+

+
+
+
+
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + +
epochtrain_losstrain_maevalid_lossvalid_maetime
04616.22509853.3405237969.31787174.67025800:00
+
+
+
+
X=torch.rand(8,2,50)
+y=torch.rand(8,1)
+splits = TimeSplitter(show_plot=False)(y)
+
+fail_test = []
+for arch in all_arch_names:
+    if not "plus" in arch.lower(): continue
+    try:
+        fcst = TSForecaster(X, y, splits=splits, arch=arch, metrics=mse, device=default_device())
+        with ContextManagers([fcst.no_bar(), fcst.no_logging()]):
+            fcst.fit_one_cycle(1, 1e-3)
+    except Exception as e:
+        fail_test.append(arch)
+        print(arch, e)
+
+test_eq(fail_test, [])
+
+ + +
+
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/tutorials.html b/tutorials.html new file mode 100644 index 000000000..2243d5bac --- /dev/null +++ b/tutorials.html @@ -0,0 +1,1278 @@ + + + + + + + + + +tsai - Tutorial notebooks + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Tutorial notebooks

+
+ + + +
+ + + + +
+ + + +
+ + + +

A number of tutorials have been created to help you get started to use tsai with time series data. Please, feel free to open the notebooks (you can open them in Colab if you want) and tweak them to do your own experiments.

+
+

Time series classification (using raw data)

+

I’d recommend you to start with:

+ +
+

Data preparation:

+

If you need help preparing your data you may find the following tutorials useful:

+ +

These last 2 provide more details in case you need them. They explain how datasets and dataloaders are created.

+
+
+

Types of architectures:

+

Once you feel comfortable, you can start exploring different types of architectures:

+
    +
  • You can use the Time Series data preparation notebook and replace the InceptionTime architecture by any other of your choice: +
      +
    • MLPs
    • +
    • RNNs (LSTM, GRU)
    • +
    • CNNs (FCN, ResNet, XResNet)
    • +
    • Wavelet-based architectures
    • +
    • Transformers (like TST - 2020)
    • +
    • They all (except ROCKET) work in the same way, for univariate or multivariate time series.
    • +
  • +
  • How to use Transformers with Time Series? may also help you understand how to successfully apply this new type of architecture to time series.
  • +
  • You can also use Time Series Classification Benchmark to perform bechmarks with different architectures and/ or configurations.
  • +
+

ROCKET (2019) is a new technique used to generate 10-20k features from time series. These features are used in a different classifier. This is the only implementation I’m aware of that uses GPU and allows both univariate and multivariate time series. To explain this method that works very well in many cases you can use the following notebook:

+ +

There are many types of classifiers as you can see, and it’s very difficult to know in advance which one will perform well in our task. However, the ones that have consistently deliver the best results in recent benchmark studies are Inceptiontime (Fawaz, 2019) and ROCKET (Dempster, 2019). Transformers, like TST (Zerveas, 2020), also show a lot of promise, but the application to time series data is so new that they have not been benchmarked against other architectures. But I’d say these are 3 architectures you should know well.

+
+
+
+

Time series classification (using time series images)

+

In these tutorials, I’ve also included a section on how to transform time series into images. This will allow you to then use DL vision models like ResNet50 for example. This approach works very well in some cases, even if you have limited data. You can learn about this technique in this notebook:

+ +
+
+

Time series regression

+

I’ve also included an example of how you can perform time series regression with your time series using tsai. In this case, the label will be continuous, instead of a category. But as you will see, the use is almost identical to time series classification. You can learn more about this here:

+ +
+
+

Visualization

+

I’ve also created PredictionDynamics callback that allows you to visualize the model’s predictions while it’s training. It can provide you some additional insights that may be useful to improve your model. Here’s the notebook:

+ +

I hope you will find these tutorial useful. I’m planning to add more tutorials to demonstrate new techniques, models, etc when they become available. So stay tuned!

+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/utils.html b/utils.html new file mode 100644 index 000000000..1004be99a --- /dev/null +++ b/utils.html @@ -0,0 +1,4894 @@ + + + + + + + + + +tsai - Utilities + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Utilities

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

General helper functions used throughout the library

+
+
+

source

+
+

random_rand

+
+
 random_rand (*d, dtype=None, out=None, seed=None)
+
+

Same as np.random.rand but with a faster random generator, dtype and seed

+
+

source

+
+
+

random_randint

+
+
 random_randint (low, high=None, size=None, dtype=<class 'int'>,
+                 endpoint=False, seed=None)
+
+

Same as np.random.randint but with a faster random generator and seed

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
lowint, lower endpoint of interval (inclusive)
highNoneTypeNoneint, upper endpoint of interval (exclusive), or None for a single-argument form of low.
sizeNoneTypeNoneint or tuple of ints, optional. Output shape.
dtypetypeintdata type of the output.
endpointboolFalsebool, optional. If True, high is an inclusive endpoint. If False, the range is open on the right.
seedNoneTypeNoneint or None, optional. Seed for the random number generator.
+
+

source

+
+
+

random_choice

+
+
 random_choice (a, size=None, replace=True, p=None, axis=0, shuffle=True,
+                dtype=None, seed=None)
+
+

Same as np.random.choice but with a faster random generator, dtype and seed

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
a1-D array-like or int. The values from which to draw the samples.
sizeNoneTypeNoneint or tuple of ints, optional. The shape of the output.
replaceboolTruebool, optional. Whether or not to allow the same value to be drawn multiple times.
pNoneTypeNone1-D array-like, optional. The probabilities associated with each entry in a.
axisint0int, optional. The axis along which the samples are drawn.
shuffleboolTruebool, optional. Whether or not to shuffle the samples before returning them.
dtypeNoneTypeNonedata type of the output.
seedNoneTypeNoneint or None, optional. Seed for the random number generator.
+
+
a = random_choice(10, size=(2,3,4), replace=True, p=None, seed=1)
+b = random_choice(10, size=(2,3,4), replace=True, p=None, seed=1)
+test_eq(a, b)
+c = random_choice(10, size=(2,3,4), replace=True, p=None, seed=2)
+test_ne(a, c)
+
+assert random_choice(10, size=3, replace=True, p=None).shape == (3,)
+assert random_choice(10, size=(2,3,4), replace=True, p=None).shape == (2,3,4)
+
+print(random_choice(10, size=3, replace=True, p=None))
+print(random_choice(10, size=3, replace=False, p=None))
+a = [2, 5, 4, 9, 13, 25, 56, 83, 99, 100]
+print(random_choice(a, size=3, replace=False, p=None))
+
+
[5 7 5]
+[0 1 6]
+[  4  83 100]
+
+
+
+
a = random_randint(10, 20, 100, seed=1)
+b = random_randint(10, 20, 100, seed=1)
+test_eq(a, b)
+c = random_randint(10, 20, 100, seed=2)
+test_ne(a, c)
+assert (a >= 10).all() and (a < 20).all()
+
+
+
a = random_rand(2, 3, 4, seed=123)
+b = random_rand(2, 3, 4, seed=123)
+test_eq(a, b)
+c = random_rand(2, 3, 4, seed=124)
+test_ne(a, c)
+assert (a >= 0).all() and (a < 1).all()
+
+a = random_rand(2, 3, 4)
+a_copy = a.copy()
+random_rand(2, 3, 4, out=a)
+test_ne(a, a_copy)
+
+
+

source

+
+
+

is_slice

+
+
 is_slice (o)
+
+
+

source

+
+
+

is_memmap

+
+
 is_memmap (o)
+
+
+

source

+
+
+

is_dask

+
+
 is_dask (o)
+
+
+

source

+
+
+

is_zarr

+
+
 is_zarr (o)
+
+
+

source

+
+
+

is_tensor

+
+
 is_tensor (o)
+
+
+

source

+
+
+

is_nparray

+
+
 is_nparray (o)
+
+
+
# ensure these folders exist for testing purposes
+fns = ['data', 'export', 'models']
+for fn in fns:
+    path = Path('.')/fn
+    if not os.path.exists(path): os.makedirs(path)
+
+
+

source

+
+
+

todtype

+
+
 todtype (dtype)
+
+
+

source

+
+
+

to3dPlusArray

+
+
 to3dPlusArray (o)
+
+
+

source

+
+
+

to3dPlusTensor

+
+
 to3dPlusTensor (o)
+
+
+

source

+
+
+

to2dPlusArray

+
+
 to2dPlusArray (o)
+
+
+

source

+
+
+

to2dPlusTensor

+
+
 to2dPlusTensor (o)
+
+
+

source

+
+
+

to3dPlus

+
+
 to3dPlus (o)
+
+
+

source

+
+
+

to2dPlus

+
+
 to2dPlus (o)
+
+
+

source

+
+
+

to1d

+
+
 to1d (o)
+
+
+

source

+
+
+

to2d

+
+
 to2d (o)
+
+
+

source

+
+
+

to3d

+
+
 to3d (o)
+
+
+

source

+
+
+

to1darray

+
+
 to1darray (o)
+
+
+

source

+
+
+

to2darray

+
+
 to2darray (o)
+
+
+

source

+
+
+

to3darray

+
+
 to3darray (o)
+
+
+

source

+
+
+

to1dtensor

+
+
 to1dtensor (o)
+
+
+

source

+
+
+

to2dtensor

+
+
 to2dtensor (o)
+
+
+

source

+
+
+

to3dtensor

+
+
 to3dtensor (o)
+
+
+

source

+
+
+

toL

+
+
 toL (o)
+
+
+

source

+
+
+

toarray

+
+
 toarray (o)
+
+
+

source

+
+
+

totensor

+
+
 totensor (o)
+
+
+
a = np.random.rand(100).astype(np.float32)
+b = torch.from_numpy(a).float()
+test_eq(totensor(a), b)
+test_eq(a, toarray(b))
+test_eq(to3dtensor(a).ndim, 3)
+test_eq(to2dtensor(a).ndim, 2)
+test_eq(to1dtensor(a).ndim, 1)
+test_eq(to3darray(b).ndim, 3)
+test_eq(to2darray(b).ndim, 2)
+test_eq(to1darray(b).ndim, 1)
+
+
+
data = np.random.rand(10, 20)
+df = pd.DataFrame(data)
+df['target'] = np.random.randint(0, 3, len(df))
+X = df[df.columns[:-1]]
+y = df['target']
+test_eq(to3darray(X).shape, (10, 1, 20))
+test_eq(toarray(y).shape, (10,))
+
+
+

source

+
+
+

get_file_size

+
+
 get_file_size (file_path:str, return_str:bool=True, decimals:int=2)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
file_pathstrpath to file
return_strboolTrueTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.
decimalsint2Number of decimals in the output
+
+

source

+
+
+

get_dir_size

+
+
 get_dir_size (dir_path:str, return_str:bool=True, decimals:int=2,
+               verbose:bool=False)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
dir_pathstrpath to directory
return_strboolTrueTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.
decimalsint2Number of decimals in the output
verboseboolFalseControls verbosity
+
+

source

+
+
+

get_size

+
+
 get_size (o, return_str=False, decimals=2)
+
+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
oAny python object
return_strboolFalseTrue returns size in human-readable format (KB, MB, GB, …). False in bytes.
decimalsint2Number of decimals in the output
+
+

source

+
+
+

bytes2str

+
+
 bytes2str (size_bytes:int, decimals=2)
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
size_bytesintNumber of bytes
decimalsint2Number of decimals in the output
Returnsstr
+
+
a = np.random.rand(10, 5, 3)
+test_eq(get_size(a, True, 1), '1.2 KB')
+
+
+

source

+
+
+

is_np_view

+
+
 is_np_view (o)
+
+ + + + + + + + + + + + + +
Details
oa numpy array
+
+
a = np.array([1., 2., 3.])
+test_eq(is_np_view(a), False)
+test_eq(is_np_view(a[1:]), True)
+
+
+

source

+
+
+

is_dir

+
+
 is_dir (path)
+
+
+

source

+
+
+

is_file

+
+
 is_file (path)
+
+
+
test_eq(is_file("002_utils.ipynb"), True)
+test_eq(is_file("utils.ipynb"), False)
+
+
+

source

+
+
+

delete_all_in_dir

+
+
 delete_all_in_dir (tgt_dir, exception=None)
+
+
+

source

+
+
+

reverse_dict

+
+
 reverse_dict (dictionary)
+
+
+

source

+
+
+

is_tuple

+
+
 is_tuple (o)
+
+
+

source

+
+
+

itemify

+
+
 itemify (*o, tup_id=None)
+
+
+
a = [1, 2, 3]
+b = [4, 5, 6]
+print(itemify(a, b))
+test_eq(len(itemify(a, b)), len(a))
+a = [1, 2, 3]
+b = None
+print(itemify(a, b))
+test_eq(len(itemify(a, b)), len(a))
+a = [1, 2, 3]
+b = [4, 5, 6]
+c = None
+print(itemify(a, b, c))
+test_eq(len(itemify(a, b, c)), len(a))
+
+
[(1, 4), (2, 5), (3, 6)]
+[(1,), (2,), (3,)]
+[(1, 4), (2, 5), (3, 6)]
+
+
+
+

source

+
+
+

ifelse

+
+
 ifelse (a, b, c)
+
+

b if a is True else c

+
+

source

+
+
+

exists

+
+
 exists (o)
+
+
+

source

+
+
+

isnone

+
+
 isnone (o)
+
+
+
a = np.array(3)
+test_eq(isnone(a), False)
+test_eq(exists(a), True)
+b = None
+test_eq(isnone(b), True)
+test_eq(exists(b), False)
+
+
+

source

+
+
+

test_eq_nan

+
+
 test_eq_nan (a, b)
+
+

test that a==b excluding nan values (valid for torch.Tensor and np.ndarray)

+
+

source

+
+
+

test_error

+
+
 test_error (error, f, *args, **kwargs)
+
+
+

source

+
+
+

test_not_ok

+
+
 test_not_ok (f, *args, **kwargs)
+
+
+

source

+
+
+

test_ok

+
+
 test_ok (f, *args, **kwargs)
+
+
+

source

+
+
+

test_type

+
+
 test_type (a, b)
+
+
+

source

+
+
+

test_not_close

+
+
 test_not_close (a, b, eps=1e-05)
+
+

test that a is within eps of b

+
+

source

+
+
+

is_not_close

+
+
 is_not_close (a, b, eps=1e-05)
+
+

Is a within eps of b

+
+

source

+
+
+

assert_fn

+
+
 assert_fn (*args, **kwargs)
+
+
+

source

+
+
+

test_le

+
+
 test_le (a, b)
+
+

test that a>b

+
+

source

+
+
+

test_lt

+
+
 test_lt (a, b)
+
+

test that a>b

+
+

source

+
+
+

test_ge

+
+
 test_ge (a, b)
+
+

test that a>=b

+
+

source

+
+
+

test_gt

+
+
 test_gt (a, b)
+
+

test that a>b

+
+
test_ok(test_gt, 5, 4)
+test_not_ok(test_gt, 4, 4)
+test_ok(test_ge, 4, 4)
+test_not_ok(test_ge, 3, 4)
+
+test_ok(test_lt, 3, 4)
+test_not_ok(test_lt, 4, 4)
+test_ok(test_le, 4, 4)
+test_not_ok(test_le, 5, 4)
+
+
+
t = torch.rand(100)
+test_eq(t, t)
+test_eq_nan(t, t)
+
+
+

source

+
+
+

stack_pad

+
+
 stack_pad (o, padding_value=nan)
+
+

Converts a an iterable into a numpy array using padding if necessary

+
+

source

+
+
+

stack

+
+
 stack (o, axis=0, retain=True)
+
+
+
o = [[0,1,2], [4,5,6,7]]
+test_eq(stack_pad(o).shape, (1, 2, 4))
+test_eq(type(stack_pad(o)), np.ndarray)
+test_eq(np.isnan(stack_pad(o)).sum(), 1)
+
+
+
o = 3
+print(stack_pad(o))
+test_eq(stack_pad(o), np.array([[3.]]))
+o = [4,5]
+print(stack_pad(o))
+test_eq(stack_pad(o), np.array([[4., 5.]]))
+o = [[0,1,2], [4,5,6,7]]
+print(stack_pad(o))
+o = np.array([0, [1,2]], dtype=object)
+print(stack_pad(o))
+o = np.array([[[0], [10, 20], [100, 200, 300]], [[0, 1, 2, 3], [10, 20], [100]]], dtype=object)
+print(stack_pad(o))
+o = np.array([0, [10, 20]], dtype=object)
+print(stack_pad(o))
+
+
[[3.]]
+[[4. 5.]]
+[[[ 0.  1.  2. nan]
+  [ 4.  5.  6.  7.]]]
+[[ 0. nan]
+ [ 1.  2.]]
+[[[  0.  nan  nan  nan]
+  [ 10.  20.  nan  nan]
+  [100. 200. 300.  nan]]
+
+ [[  0.   1.   2.   3.]
+  [ 10.  20.  nan  nan]
+  [100.  nan  nan  nan]]]
+[[ 0. nan]
+ [10. 20.]]
+
+
+
+
a = np.random.rand(2, 3, 4)
+t = torch.from_numpy(a)
+test_eq_type(stack(itemify(a, tup_id=0)), a)
+test_eq_type(stack(itemify(t, tup_id=0)), t)
+
+
+

source

+
+
+

pad_sequences

+
+
 pad_sequences (o, maxlen:int=None,
+                dtype:(<class'str'>,<class'type'>)=<class
+                'numpy.float64'>, padding:str='pre', truncating:str='pre',
+                padding_value:float=nan)
+
+

Transforms an iterable with sequences into a 3d numpy array using padding or truncating sequences if necessary

+ ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
oIterable object
maxlenintNoneOptional max length of the output. If None, max length of the longest individual sequence.
dtype(<class ‘str’>, <class ‘type’>)float64Type of the output sequences. To pad sequences with variable length strings, you can use object.
paddingstrpre‘pre’ or ‘post’ pad either before or after each sequence.
truncatingstrpre‘pre’ or ‘post’ remove values from sequences larger than maxlen, either at the beginning or at the end of the sequences.
padding_valuefloatnanValue used for padding.
+

This function transforms a list (of length n_samples) of sequences into a 3d numpy array of shape:

+
                          [n_samples x n_vars x seq_len]
+

seq_len is either the maxlen argument if provided, or the length of the longest sequence in the list.

+

Sequences that are shorter than seq_len are padded with value until they are seq_len long.

+

Sequences longer than seq_len are truncated so that they fit the desired length.

+

The position where padding or truncation happens is determined by the arguments padding and truncating, respectively. Pre-padding or removing values from the beginning of the sequence is the default.

+

Input sequences to pad_sequences may be have 1, 2 or 3 dimensions:

+
+
# 1 dim
+a1 = np.arange(6)
+a2 = np.arange(3) * 10
+a3 = np.arange(2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 1, 4))
+padded_o
+
+
array([[[  2.,   3.,   4.,   5.]],
+
+       [[  0.,  10.,  20.,  nan]],
+
+       [[  0., 100.,  nan,  nan]]])
+
+
+
+
# 2 dim
+a1 = np.arange(12).reshape(2, 6)
+a2 = np.arange(6).reshape(2, 3) * 10
+a3 = np.arange(4).reshape(2, 2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 4))
+padded_o
+
+
array([[[  2.,   3.,   4.,   5.],
+        [  8.,   9.,  10.,  11.]],
+
+       [[  0.,  10.,  20.,  nan],
+        [ 30.,  40.,  50.,  nan]],
+
+       [[  0., 100.,  nan,  nan],
+        [200., 300.,  nan,  nan]]])
+
+
+
+
# 3 dim
+a1 = np.arange(10).reshape(1, 2, 5)
+a2 = np.arange(6).reshape(1, 2, 3) * 10
+a3 = np.arange(4).reshape(1, 2, 2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=None, dtype=np.float64, padding='pre', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 5))
+padded_o
+
+
array([[[  0.,   1.,   2.,   3.,   4.],
+        [  5.,   6.,   7.,   8.,   9.]],
+
+       [[ nan,  nan,   0.,  10.,  20.],
+        [ nan,  nan,  30.,  40.,  50.]],
+
+       [[ nan,  nan,  nan,   0., 100.],
+        [ nan,  nan,  nan, 200., 300.]]])
+
+
+
+
# 3 dim
+a1 = np.arange(10).reshape(1, 2, 5)
+a2 = np.arange(6).reshape(1, 2, 3) * 10
+a3 = np.arange(4).reshape(1, 2, 2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='pre', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 4))
+padded_o
+
+
array([[[  1.,   2.,   3.,   4.],
+        [  6.,   7.,   8.,   9.]],
+
+       [[ nan,   0.,  10.,  20.],
+        [ nan,  30.,  40.,  50.]],
+
+       [[ nan,  nan,   0., 100.],
+        [ nan,  nan, 200., 300.]]])
+
+
+
+
# 3 dim
+a1 = np.arange(10).reshape(1, 2, 5)
+a2 = np.arange(6).reshape(1, 2, 3) * 10
+a3 = np.arange(4).reshape(1, 2, 2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 4))
+padded_o
+
+
array([[[  1.,   2.,   3.,   4.],
+        [  6.,   7.,   8.,   9.]],
+
+       [[  0.,  10.,  20.,  nan],
+        [ 30.,  40.,  50.,  nan]],
+
+       [[  0., 100.,  nan,  nan],
+        [200., 300.,  nan,  nan]]])
+
+
+
+
# 3 dim
+a1 = np.arange(10).reshape(1, 2, 5)
+a2 = np.arange(6).reshape(1, 2, 3) * 10
+a3 = np.arange(4).reshape(1, 2, 2) * 100
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=4, dtype=np.float64, padding='post', truncating='post', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 4))
+padded_o
+
+
array([[[  0.,   1.,   2.,   3.],
+        [  5.,   6.,   7.,   8.]],
+
+       [[  0.,  10.,  20.,  nan],
+        [ 30.,  40.,  50.,  nan]],
+
+       [[  0., 100.,  nan,  nan],
+        [200., 300.,  nan,  nan]]])
+
+
+
+
# iterable is a list of lists
+a1 = np.arange(12).reshape(1, 2, 6).tolist()
+a2 = (np.arange(6).reshape(1, 2, 3) * 10).tolist()
+a3 = (np.arange(4).reshape(1, 2, 2) * 100).tolist()
+o  = [a1, a2, a3]
+padded_o = pad_sequences(o, maxlen=None, dtype=np.float64, padding='post', truncating='pre', padding_value=np.nan)
+test_eq(padded_o.shape, (3, 2, 6))
+padded_o
+
+
array([[[  0.,   1.,   2.,   3.,   4.,   5.],
+        [  6.,   7.,   8.,   9.,  10.,  11.]],
+
+       [[  0.,  10.,  20.,  nan,  nan,  nan],
+        [ 30.,  40.,  50.,  nan,  nan,  nan]],
+
+       [[  0., 100.,  nan,  nan,  nan,  nan],
+        [200., 300.,  nan,  nan,  nan,  nan]]])
+
+
+
+

source

+
+
+

match_seq_len

+
+
 match_seq_len (*arrays)
+
+
+
a = np.random.rand(10, 5, 8)
+b = np.random.rand(3, 5, 10)
+c, d = match_seq_len(a, b)
+test_eq(c.shape[-1], d.shape[-1])
+
+
+

source

+
+
+

random_shuffle

+
+
 random_shuffle (o, random_state=None)
+
+
+
a = np.arange(10)
+test_eq_type(random_shuffle(a, 1), np.array([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
+t = torch.arange(10)
+test_eq_type(random_shuffle(t, 1), tensor([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
+l = list(a)
+test_eq(random_shuffle(l, 1), [2, 9, 6, 4, 0, 3, 1, 7, 8, 5])
+l2 = L(l)
+test_eq_type(random_shuffle(l2, 1), L([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]))
+
+
+

source

+
+
+

cat2int

+
+
 cat2int (o)
+
+
+
a = np.array(['b', 'a', 'a', 'b', 'a', 'b', 'a'])
+test_eq_type(cat2int(a), TensorCategory([1, 0, 0, 1, 0, 1, 0]))
+
+
+
TensorBase([1,2,3])
+
+
TensorBase([1, 2, 3])
+
+
+
+

source

+
+
+

cycle_dl_estimate

+
+
 cycle_dl_estimate (dl, iters=10)
+
+
+

source

+
+
+

cycle_dl_to_device

+
+
 cycle_dl_to_device (dl, show_progress_bar=True)
+
+
+

source

+
+
+

cycle_dl

+
+
 cycle_dl (dl, show_progress_bar=True)
+
+
+

source

+
+
+

cache_data

+
+
 cache_data (o, slice_len=10000, verbose=False)
+
+
+

source

+
+
+

get_func_defaults

+
+
 get_func_defaults (f)
+
+
+

source

+
+
+

get_idx_from_df_col_vals

+
+
 get_idx_from_df_col_vals (df, col, val_list)
+
+
+

source

+
+
+

get_sublist_idxs

+
+
 get_sublist_idxs (aList, bList)
+
+

Get idxs that when applied to aList will return bList. aList must contain all values in bList

+
+
x = np.array([3, 5, 7, 1, 9, 8, 6, 2])
+y = np.array([6, 1, 5, 7])
+idx = get_sublist_idxs(x, y)
+test_eq(x[idx], y)
+x = np.array([3, 5, 7, 1, 9, 8, 6, 6, 2])
+y = np.array([6, 1, 5, 7, 5])
+idx = get_sublist_idxs(x, y)
+test_eq(x[idx], y)
+
+
+

source

+
+
+

flatten_list

+
+
 flatten_list (l)
+
+
+

source

+
+
+

display_pd_df

+
+
 display_pd_df (df, max_rows:Union[bool,int]=False,
+                max_columns:Union[bool,int]=False)
+
+
+
old_max_rows, old_max_columns = pd.get_option('display.max_rows'), pd.get_option('display.max_columns')
+df = pd.DataFrame(np.random.rand(70, 25))
+display_pd_df(df, max_rows=2, max_columns=3)
+test_eq(old_max_rows, pd.get_option('display.max_rows'))
+test_eq(old_max_columns, pd.get_option('display.max_columns'))
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
0...24
00.436034...0.231616
............
690.633051...0.051762
+ +

70 rows × 25 columns

+
+
+
+
+

source

+
+
+

tscore

+
+
 tscore (o)
+
+
+

source

+
+
+

kstest

+
+
 kstest (data1, data2, alternative='two-sided', mode='auto', by_axis=None)
+
+

Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.

+

Parameters data1, data2: Two arrays of sample observations assumed to be drawn from a continuous distributions. Sample sizes can be different. alternative: {‘two-sided’, ‘less’, ‘greater’}, optional. Defines the null and alternative hypotheses. Default is ‘two-sided’. mode: {‘auto’, ‘exact’, ‘asymp’}, optional. Defines the method used for calculating the p-value. by_axis (optional, int): for arrays with more than 1 dimension, the test will be run for each variable in that axis if by_axis is not None.

+
+

source

+
+
+

ttest

+
+
 ttest (data1, data2, equal_var=False)
+
+

Calculates t-statistic and p-value based on 2 sample distributions

+
+
a = np.random.normal(0.5, 1, 100)
+b = np.random.normal(0.15, .5, 50)
+plt.hist(a, 50)
+plt.hist(b, 50)
+plt.show()
+ttest(a,b)
+
+
+
+

+
+
+
+
+
+
a = np.random.normal(0.5, 1, (100,3))
+b = np.random.normal(0.5, 1, (50,))
+kstest(a,b)
+
+
(0.22333333333333333, 0.02452803315700394)
+
+
+
+
a = np.random.normal(0.5, 1, (100,3))
+b = np.random.normal(0.15, .5, (50,))
+kstest(a,b)
+
+
(0.31, 0.0004061333917852463)
+
+
+
+
data1 = np.random.normal(0,1,(100, 5, 3))
+data2 = np.random.normal(0,2,(100, 5, 3))
+kstest(data1, data2, by_axis=1)
+
+
([0.22,
+  0.16333333333333333,
+  0.16333333333333333,
+  0.18666666666666668,
+  0.21666666666666667],
+ [8.994053173844458e-07,
+  0.0006538374533623971,
+  0.0006538374533623971,
+  5.522790313356146e-05,
+  1.4007759411179028e-06])
+
+
+
+
a = np.random.normal(0.5, 1, 100)
+t = torch.normal(0.5, 1, (100, ))
+tscore(a), tscore(t)
+
+
(4.33309224863388, tensor(5.7798))
+
+
+
+

source

+
+
+

scc

+
+
 scc (a, b)
+
+
+

source

+
+
+

pcc

+
+
 pcc (a, b)
+
+
+

source

+
+
+

remove_fn

+
+
 remove_fn (fn, verbose=False)
+
+

Removes a file (fn) if exists

+
+

source

+
+
+

npsave

+
+
 npsave (array_fn, array, verbose=True)
+
+
+
fn = 'data/remove_fn_test.npy'
+a = np.zeros(1)
+npsave(fn, a)
+del a
+np.load(fn, mmap_mode='r+')
+remove_fn(fn, True)
+remove_fn(fn, True)
+
+
data/remove_fn_test.npy does not exist
+saving data/remove_fn_test.npy...
+...data/remove_fn_test.npy saved
+data/remove_fn_test.npy file removed
+data/remove_fn_test.npy does not exist
+
+
+
+

source

+
+
+

permute_2D

+
+
 permute_2D (array, axis=None)
+
+

Permute rows or columns in an array. This can be used, for example, in feature permutation

+
+
s = np.arange(100 * 50).reshape(100, 50)
+test_eq(permute_2D(s, axis=0).mean(0), s.mean(0))
+test_ne(permute_2D(s, axis=0), s)
+test_eq(permute_2D(s, axis=1).mean(1), s.mean(1))
+test_ne(permute_2D(s, axis=1), s)
+test_ne(permute_2D(s), s)
+
+
+

source

+
+
+

random_half_normal_tensor

+
+
 random_half_normal_tensor (shape=1, device=None)
+
+

Returns a tensor of a predefined shape between 0 and 1 with a half-normal distribution

+
+

source

+
+
+

random_normal_tensor

+
+
 random_normal_tensor (shape=1, device=None)
+
+

Returns a tensor of a predefined shape between -1 and 1 with a normal distribution

+
+

source

+
+
+

random_half_normal

+
+
 random_half_normal ()
+
+

Returns a number between 0 and 1 with a half-normal distribution

+
+

source

+
+
+

random_normal

+
+
 random_normal ()
+
+

Returns a number between -1 and 1 with a normal distribution

+
+

source

+
+
+

fig2buf

+
+
 fig2buf (fig)
+
+
+

source

+
+
+

get_plot_fig

+
+
 get_plot_fig (size=None, dpi=100)
+
+
+

source

+
+
+

default_dpi

+
+
 default_dpi ()
+
+
+
default_dpi()
+
+
100
+
+
+
+

source

+
+
+

plot_scatter

+
+
 plot_scatter (x, y, deg=1)
+
+
+
a = np.random.rand(100)
+b = np.random.rand(100)**2
+plot_scatter(a, b)
+
+
+
+

+
+
+
+
+
+

source

+
+
+

get_idxs

+
+
 get_idxs (o, aList)
+
+
+
a = random_shuffle(np.arange(100, 200))
+b = np.random.choice(a, 10, False)
+idxs = get_idxs(a, b)
+test_eq(a[idxs], b)
+
+
+

source

+
+
+

apply_cmap

+
+
 apply_cmap (o, cmap)
+
+
+
a = np.random.rand(16, 1, 40, 50)
+s = L(a.shape)
+s[1] = 3
+test_eq(L(apply_cmap(a, 'viridis').shape), s)
+
+s[0] = 1
+a = np.random.rand(1, 40, 50)
+test_eq(L(apply_cmap(a, 'viridis').shape), s)
+
+
+

source

+
+
+

torch_tile

+
+
 torch_tile (a, n_tile, dim=0)
+
+
+
test_eq(torch_tile(torch.arange(2), 3), tensor([0, 1, 0, 1, 0, 1]))
+
+
+

source

+
+
+

to_tsfresh_df

+
+
 to_tsfresh_df (ts)
+
+

Prepares a time series (Tensor/ np.ndarray) to be used as a tsfresh dataset to allow feature extraction

+
+
ts = torch.rand(16, 3, 20)
+a = to_tsfresh_df(ts)
+ts = ts.numpy()
+b = to_tsfresh_df(ts)
+
+
+

source

+
+
+

scorr

+
+
 scorr (a, b)
+
+
+

source

+
+
+

pcorr

+
+
 pcorr (a, b)
+
+
+

source

+
+
+

torch_diff

+
+
 torch_diff (t, lag=1, pad=True, append=0)
+
+
+
t = torch.arange(24).reshape(2,3,4)
+test_eq(torch_diff(t, 1)[..., 1:].float().mean(), 1.)
+test_eq(torch_diff(t, 2)[..., 2:].float().mean(), 2.)
+
+
+

source

+
+
+

torch_clamp

+
+
 torch_clamp (o, min=None, max=None)
+
+

Clamp torch.Tensor using 1 or multiple dimensions

+
+

source

+
+
+

get_percentile

+
+
 get_percentile (o, percentile, axis=None)
+
+
+

source

+
+
+

clip_outliers

+
+
 clip_outliers (o, axis=None)
+
+
+

source

+
+
+

get_outliers_IQR

+
+
 get_outliers_IQR (o, axis=None, quantile_range=(25.0, 75.0))
+
+
+
t = torch.randn(2,3,100)
+test_eq(type(get_outliers_IQR(t, -1)[0]), torch.Tensor)
+a = t.numpy()
+test_eq(type(get_outliers_IQR(a, -1)[0]), np.ndarray)
+test_close(get_percentile(t, 25).numpy(), get_percentile(a, 25))
+
+
+

source

+
+
+

get_robustscale_params

+
+
 get_robustscale_params (o, sel_vars=None, not_sel_vars=None, by_var=True,
+                         percentiles=(25, 75), eps=1e-06)
+
+

Calculates median and inter-quartile range required to robust scaler inputs

+
+
a = np.random.rand(16, 3, 100)
+a[a>.8] = np.nan
+median, IQR = get_robustscale_params(a, by_var=True, percentiles=(25, 75))
+a_scaled = (a - median) / IQR
+test_eq(a.shape, a_scaled.shape)
+test_eq(np.isnan(median).sum(),0)
+test_eq(np.isnan(IQR).sum(),0)
+test_eq(np.isnan(a), np.isnan(a_scaled))
+
+
+

source

+
+
+

torch_slice_by_dim

+
+
 torch_slice_by_dim (t, index, dim=-1, **kwargs)
+
+
+
t = torch.rand(5, 3)
+index = torch.randint(0, 3, (5, 1))
+# index = [[0, 2], [0, 1], [1, 2], [0, 2], [0, 1]]
+torch_slice_by_dim(t, index)
+
+
tensor([[0.5341],
+        [0.4543],
+        [0.0942],
+        [0.9645],
+        [0.0405]])
+
+
+
+

source

+
+
+

torch_nanstd

+
+
 torch_nanstd (o, dim=None, keepdim=False)
+
+

There’s currently no torch.nanstd function

+
+

source

+
+
+

torch_nanmean

+
+
 torch_nanmean (o, dim=None, keepdim=False)
+
+

There’s currently no torch.nanmean function

+
+
t = torch.rand(1000)
+t[:100] = float('nan')
+assert torch_nanmean(t).item() > 0
+
+
+

source

+
+
+

concat

+
+
 concat (*ls, dim=0)
+
+

Concatenate tensors, arrays, lists, or tuples by a dimension

+
+

source

+
+
+

reduce_memory_usage

+
+
 reduce_memory_usage (df)
+
+
+

source

+
+
+

cls_name

+
+
 cls_name (o)
+
+
+
test_eq(cls_name(timer), 'Timer')
+
+
+

source

+
+
+

rotate_axis2

+
+
 rotate_axis2 (o, steps=1)
+
+
+

source

+
+
+

rotate_axis1

+
+
 rotate_axis1 (o, steps=1)
+
+
+

source

+
+
+

rotate_axis0

+
+
 rotate_axis0 (o, steps=1)
+
+
+

source

+
+
+

random_roll3d

+
+
 random_roll3d (o, axis=(), replace=False)
+
+

Randomly rolls a 3D object along the indicated axes This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently

+
+

source

+
+
+

random_roll2d

+
+
 random_roll2d (o, axis=(), replace=False)
+
+

Rolls a 2D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently

+
+

source

+
+
+

roll3d

+
+
 roll3d (o, roll1:Union[NoneType,list,int]=None,
+         roll2:Union[NoneType,list,int]=None,
+         roll3:Union[NoneType,list,int]=None)
+
+

Rolls a 3D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently

+
+

source

+
+
+

roll2d

+
+
 roll2d (o, roll1:Union[NoneType,list,int]=None,
+         roll2:Union[NoneType,list,int]=None)
+
+

Rolls a 2D object on the indicated axis This solution is based on https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently

+
+
a = np.tile(np.arange(10), 3).reshape(3, 10) * np.array([1, 10, 100]).reshape(-1, 1)
+a
+
+
array([[  0,   1,   2,   3,   4,   5,   6,   7,   8,   9],
+       [  0,  10,  20,  30,  40,  50,  60,  70,  80,  90],
+       [  0, 100, 200, 300, 400, 500, 600, 700, 800, 900]])
+
+
+
+
roll2d(a, roll1=[2, 1, 0])
+
+
array([[  0, 100, 200, 300, 400, 500, 600, 700, 800, 900],
+       [  0,  10,  20,  30,  40,  50,  60,  70,  80,  90],
+       [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9]])
+
+
+
+
roll2d(a, roll2=3)
+
+
array([[  7,   8,   9,   0,   1,   2,   3,   4,   5,   6],
+       [ 70,  80,  90,   0,  10,  20,  30,  40,  50,  60],
+       [700, 800, 900,   0, 100, 200, 300, 400, 500, 600]])
+
+
+
+
o = torch.arange(24).reshape(2,3,4)
+test_eq(rotate_axis0(o)[1], o[0])
+test_eq(rotate_axis1(o)[:,1], o[:,0])
+test_eq(rotate_axis2(o)[...,1], o[...,0])
+
+
+

source

+
+
+

chunks_calculator

+
+
 chunks_calculator (shape, dtype='float32', n_bytes=1073741824)
+
+

Function to calculate chunks for a given size of n_bytes (default = 1024**3 == 1GB). It guarantees > 50% of the chunk will be filled

+
+
shape = (1_000, 10, 1000)
+dtype = 'float32'
+test_eq(chunks_calculator(shape, dtype), False)
+
+shape = (54684, 10, 1000)
+dtype = 'float32'
+test_eq(chunks_calculator(shape, dtype), (27342, -1, -1))
+
+
+

source

+
+
+

is_memory_shared

+
+
 is_memory_shared (a, b)
+
+

Check if 2 array-like objects share memory

+
+
a = np.random.rand(2,3,4)
+t1 = torch.from_numpy(a)
+test_eq(is_memory_shared(a, t1), True)
+a = np.random.rand(2,3,4)
+t2 = torch.as_tensor(a)
+test_eq(is_memory_shared(a, t2), True)
+a = np.random.rand(2,3,4)
+t3 = torch.tensor(a)
+test_eq(is_memory_shared(a, t3), False)
+
+
+

source

+
+
+

assign_in_chunks

+
+
 assign_in_chunks (a, b, chunksize='auto', inplace=True, verbose=True)
+
+

Assigns values in b to an array-like object a using chunks to avoid memory overload. The resulting a retains it’s dtype and share it’s memory. a: array-like object b: may be an integer, float, str, ‘rand’ (for random data), or another array like object. chunksize: is the size of chunks. If ‘auto’ chunks will have around 1GB each.

+
+
a = np.random.rand(10,3,4).astype('float32')
+a_dtype = a.dtype
+a_id = id(a)
+b = np.random.rand(10,3,4).astype('float64')
+assign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)
+test_close(a, b)
+test_eq(a.dtype, a_dtype)
+test_eq(id(a), a_id)
+
+a = np.random.rand(10,3,4).astype('float32')
+a_dtype = a.dtype
+a_id = id(a)
+b = 1
+assign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)
+test_eq(a, np.ones_like(a).astype(a.dtype))
+test_eq(a.dtype, a_dtype)
+test_eq(id(a), a_id)
+
+a = np.random.rand(10,3,4).astype('float32')
+a_dtype = a.dtype
+a_id = id(a)
+b = 0.5
+assign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)
+test_eq(a.dtype, a_dtype)
+test_eq(id(a), a_id)
+
+a = np.random.rand(10,3,4).astype('float32')
+a_dtype = a.dtype
+a_id = id(a)
+b = 'rand'
+assign_in_chunks(a, b, chunksize=2, inplace=True, verbose=True)
+test_eq(a.dtype, a_dtype)
+test_eq(id(a), a_id)
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+
a = np.random.rand(10,3,4).astype('float32')
+b = np.random.rand(10,3,4).astype('float64')
+c = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)
+test_close(c, b)
+test_eq(a.dtype, c.dtype)
+test_eq(is_memory_shared(a, c), True)
+
+a = np.random.rand(10,3,4).astype('float32')
+b = 1
+c = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)
+test_eq(a, np.ones_like(a).astype(a.dtype))
+test_eq(a.dtype, c.dtype)
+test_eq(is_memory_shared(a, c), True)
+
+a = np.random.rand(10,3,4).astype('float32')
+b = 0.5
+c = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)
+test_eq(a.dtype, c.dtype)
+test_eq(is_memory_shared(a, c), True)
+
+a = np.random.rand(10,3,4).astype('float32')
+b = 'rand'
+c = assign_in_chunks(a, b, chunksize=2, inplace=False, verbose=True)
+test_eq(a.dtype, c.dtype)
+test_eq(is_memory_shared(a, c), True)
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+

source

+
+
+

create_array

+
+
 create_array (shape, fname=None, path='./data', on_disk=True,
+               dtype='float32', mode='r+', fill_value='rand',
+               chunksize='auto', verbose=True, **kwargs)
+
+

mode: ‘r’: Open existing file for reading only. ‘r+’: Open existing file for reading and writing. ‘w+’: Create or overwrite existing file for reading and writing. ‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only. fill_value: ‘rand’ (for random numbers), int or float chunksize = ‘auto’ to calculate chunks of 1GB, or any integer (for a given number of samples)

+
+
fname = 'X_on_disk'
+shape = (100, 10, 10)
+X = create_array(shape, fname, on_disk=True, mode='r+')
+test_ne(abs(X).sum(), 0)
+os.remove(X.filename)
+del X
+
+
auto chunksize: 100
+
+
+ + +
+
+ +
+
+
+
fname = 'X_on_disk'
+shape = (100, 10, 10)
+X = create_empty_array(shape, fname, on_disk=True, mode='r+')
+test_eq(abs(X).sum(), 0)
+
+chunksize = 10
+pbar = progress_bar(range(math.ceil(len(X) / chunksize)), leave=False)
+start = 0
+for i in pbar:
+    end = min(start + chunksize, len(X))
+    partial_data = np.random.rand(end - start, X.shape[1] , X.shape[2])
+    X[start:end] = partial_data
+    start = end
+    del partial_data
+    gc.collect()
+filename = X.filename
+del X
+X = np.load(filename, mmap_mode='r+')
+test_eq((X == 0).sum(), 0)
+test_eq(X.shape, shape)
+os.remove(X.filename)
+del X
+
+ + +
+
+ +
+
+
+

source

+
+
+

np_load_compressed

+
+
 np_load_compressed (fname=None, path='./data', **kwargs)
+
+
+

source

+
+
+

np_save_compressed

+
+
 np_save_compressed (arr, fname=None, path='./data', verbose=False,
+                     **kwargs)
+
+
+
X1 = np.random.rand(10)
+np_save_compressed(X1, 'X_comp', path='./data')
+X2 = np_load_compressed('X_comp')
+test_eq(X1, X2)
+
+
+

source

+
+
+

np2memmap

+
+
 np2memmap (arr, fname=None, path='./data', dtype='float32', mode='c',
+            **kwargs)
+
+

Function that turns an ndarray into a memmap ndarray mode: ‘r’: Open existing file for reading only. ‘r+’: Open existing file for reading and writing. ‘w+’: Create or overwrite existing file for reading and writing. ‘c’: Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only.

+
+
X1 = np.random.rand(10)
+X2 = np2memmap(X1, 'X1_test')
+test_eq(X1, X2)
+test_ne(type(X1), type(X2))
+
+
+

source

+
+
+

torch_mean_groupby

+
+
 torch_mean_groupby (o, idxs)
+
+

Computes torch mean along axis 0 grouped by the idxs. Need to ensure that idxs have the same order as o

+
+
o = torch.arange(6*2*3).reshape(6, 2, 3).float()
+idxs = np.array([[0,1,2,3], [2,3]], dtype=object)
+output = torch_mean_groupby(o, idxs)
+test_eq(o[:2], output[:2])
+test_eq(o[2:4].mean(0), output[2])
+test_eq(o[4:6].mean(0), output[3])
+
+
+

source

+
+
+

torch_flip

+
+
 torch_flip (t, dims=-1)
+
+
+
t = torch.randn(2, 3, 4)
+test_eq(torch.flip(t, (2,)), torch_flip(t, dims=-1))
+
+
+

source

+
+
+

torch_masked_to_num

+
+
 torch_masked_to_num (o, mask, num=0, inplace=False)
+
+
+

source

+
+
+

torch_nan_to_num

+
+
 torch_nan_to_num (o, num=0, inplace=False)
+
+
+
x = torch.rand(2, 4, 6)
+x[:, :3][x[:, :3] < .5] = np.nan
+nan_values = torch.isnan(x).sum()
+y = torch_nan_to_num(x[:, :3], inplace=False)
+test_eq(torch.isnan(y).sum(), 0)
+test_eq(torch.isnan(x).sum(), nan_values)
+torch_nan_to_num(x[:, :3], inplace=True)
+test_eq(torch.isnan(x).sum(), 0)
+
+
+
x = torch.rand(2, 4, 6)
+mask = x[:, :3] > .5
+x[:, :3] = torch_masked_to_num(x[:, :3], mask, num=0, inplace=False)
+test_eq(x[:, :3][mask].sum(), 0)
+
+
+
x = torch.rand(2, 4, 6)
+mask = x[:, :3] > .5
+torch_masked_to_num(x[:, :3], mask, num=0, inplace=True)
+test_eq(x[:, :3][mask].sum(), 0)
+
+
+

source

+
+
+

mpl_trend

+
+
 mpl_trend (x, y, deg=1)
+
+
+
x = np.sort(np.random.randint(0, 100, 100)/10)
+y = np.random.rand(100) + np.linspace(0, 10, 100)
+trend = mpl_trend(x, y)
+plt.scatter(x, y)
+plt.plot(x, trend, 'r')
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

array2digits

+
+
 array2digits (o, n_digits=None, normalize=True)
+
+
+

source

+
+
+

int2digits

+
+
 int2digits (o, n_digits=None, normalize=True)
+
+
+
o = -9645
+test_eq(int2digits(o, 6), np.array([ 0,  0, -.9, -.6, -.4, -.5]))
+
+a = np.random.randint(-1000, 1000, 10)
+test_eq(array2digits(a,5).shape, (10,5))
+
+
+

source

+
+
+

sincos_encoding

+
+
 sincos_encoding (seq_len, device=None, to_np=False)
+
+
+
sin, cos = sincos_encoding(100)
+plt.plot(sin.cpu().numpy())
+plt.plot(cos.cpu().numpy())
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

linear_encoding

+
+
 linear_encoding (seq_len, device=None, to_np=False, lin_range=(-1, 1))
+
+
+
lin = linear_encoding(100)
+plt.plot(lin.cpu().numpy())
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

encode_positions

+
+
 encode_positions (pos_arr, min_val=None, max_val=None, linear=False,
+                   lin_range=(-1, 1))
+
+

Encodes an array with positions using a linear or sincos methods

+
+
n_samples = 10
+length = 500
+_a = []
+for i in range(n_samples):
+    a = np.arange(-4000, 4000, 10)
+    mask = np.random.rand(len(a)) > .5
+    a = a[mask]
+    a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])
+    _a.append(a.reshape(-1,1))
+a = np.concatenate(_a, -1).transpose(1,0)
+sin, cos = encode_positions(a, linear=False)
+test_eq(a.shape, (n_samples, length))
+test_eq(sin.shape, (n_samples, length))
+test_eq(cos.shape, (n_samples, length))
+plt.plot(sin.T)
+plt.plot(cos.T)
+plt.xlim(0, 500)
+plt.show()
+
+
+
+

+
+
+
+
+
+
n_samples = 10
+length = 500
+_a = []
+for i in range(n_samples):
+    a = np.arange(-4000, 4000, 10)
+    mask = np.random.rand(len(a)) > .5
+    a = a[mask]
+    a = np.concatenate([a, np.array([np.nan] * (length - len(a)))])
+    _a.append(a.reshape(-1,1))
+a = np.concatenate(_a, -1).transpose(1,0)
+lin = encode_positions(a, linear=True)
+test_eq(a.shape, (n_samples, length))
+test_eq(lin.shape, (n_samples, length))
+plt.plot(lin.T)
+plt.xlim(0, 500)
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

sort_generator

+
+
 sort_generator (generator, bs)
+
+
+
generator = (i for i in np.random.permutation(np.arange(1000000)).tolist())
+l = list(sort_generator(generator, 512))
+test_eq(l[:512], sorted(l[:512]))
+
+
+

source

+
+
+

get_subset_dict

+
+
 get_subset_dict (d, keys)
+
+
+
keys = string.ascii_lowercase
+values = np.arange(len(keys))
+d = {k:v for k,v in zip(keys,values)}
+test_eq(get_subset_dict(d, ['a', 'k', 'j', 'e']), {'a': 0, 'k': 10, 'j': 9, 'e': 4})
+
+
+

source

+
+
+

remove_dir

+
+
 remove_dir (directory, verbose=True)
+
+
+

source

+
+
+

create_dir

+
+
 create_dir (directory, verbose=True)
+
+
+
path = "wandb3/wandb2/wandb"
+create_dir(path)
+assert Path(path).exists()
+
+paths = ["wandb3/wandb2/wandb", "wandb3/wandb2", "wandb"]
+remove_dir(paths)
+for p in paths:
+    assert not Path(p).exists()
+
+path = "wandb3"
+assert Path(path).exists()
+remove_dir(path)
+assert not Path(path).exists()
+
+
wandb3/wandb2/wandb directory created.
+wandb3/wandb2/wandb directory removed.
+wandb3/wandb2 directory removed.
+wandb directory doesn't exist.
+wandb3 directory removed.
+
+
+
+
create_dir('./test')
+
+
test directory created.
+
+
+
+
a = 5
+def fn(b): return a + b
+
+
Writing ./test/mod_dev.py
+
+
+
+
fname = "./test/mod_dev.py"
+while True:
+    if fname[0] in "/ .": fname = fname.split(fname[0], 1)[1]
+    else: break
+if '/' in fname and fname.rsplit('/', 1)[0] not in sys.path: sys.path.append(fname.rsplit('/', 1)[0])
+mod = import_file_as_module(fname)
+test_eq(mod.fn(3), 8)
+sys.path = sys.path[:-1]
+remove_dir('./test/')
+
+
test directory removed.
+
+
+
+

source

+
+
+

named_partial

+
+
 named_partial (name, func, *args, **kwargs)
+
+

Create a partial function with a name

+
+
def add_1(x, add=1): return x+add
+test_eq(add_1(1), 2)
+add_2 = partial(add_1, add=2)
+test_eq(add_2(2), 4)
+test_ne(str(add_2), "add_2")
+add_2 = named_partial('add_2', add_1, add=2)
+test_eq(add_2(2), 4)
+test_eq(str(add_2), "add_2")
+
+class _A():
+    def __init__(self, add=1): self.add = add
+    def __call__(self, x): return x + self.add
+
+test_eq(_A()(1), 2)
+_A2 = partial(_A, add=2)
+test_eq(_A2()(1), 3)
+test_ne(str(_A2), '_A2')
+_A2 = named_partial('_A2', _A, add=2)
+test_eq(_A2()(1), 3)
+test_eq(str(_A2), '_A2')
+
+
+

source

+
+
+

dict2attrdict

+
+
 dict2attrdict (d:dict)
+
+

Converts a (nested) dict to an AttrDict.

+ + + + + + + + + + + + + + + +
TypeDetails
ddicta dict
+
+

source

+
+
+

attrdict2dict

+
+
 attrdict2dict (d:dict)
+
+

Converts a (nested) AttrDict dict to a dict.

+ + + + + + + + + + + + + + + +
TypeDetails
ddicta dict
+
+
# Test attrdict2dict
+d = AttrDict({'a': 1, 'b': AttrDict({'c': 2, 'd': 3})})
+test_eq(attrdict2dict(d), {'a': 1, 'b': {'c': 2, 'd': 3}})
+# Test dict2attrdict
+d = {'a': 1, 'b': {'c': 2, 'd': 3}}
+test_eq(dict2attrdict(d), AttrDict({'a': 1, 'b': AttrDict({'c': 2, 'd': 3})}))
+
+
+

source

+
+
+

get_config

+
+
 get_config (file_path)
+
+

Gets a config from a yaml file.

+
+

source

+
+
+

yaml2dict

+
+
 yaml2dict (file_path, attrdict=True)
+
+

Converts a yaml file to a dict (optionally AttrDict).

+ + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
file_patha path to a yaml file
attrdictboolTrueif True, convert output to AttrDict
+
+

source

+
+
+

dict2yaml

+
+
 dict2yaml (d, file_path, sort_keys=False)
+
+

Converts a dict to a yaml file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDefaultDetails
da dict
file_patha path to a yaml file
sort_keysboolFalseif True, sort the keys
+
+
program: wandb_scripts/train_script.py          # (required) Path to training script.
+method: bayes                                   # (required) Specify the search strategy: grid, random or bayes
+parameters:                                     # (required) Specify parameters bounds to search.
+   bs:
+      values: [32, 64, 128]
+   depth:
+      values: [3, 6, 9, 12]
+   fc_dropout:
+      distribution: uniform
+      min: 0.
+      max: 0.5
+   lr_max:
+      values: [0.001, 0.003, 0.01, 0.03, 0.1]
+   n_epoch:
+      values: [10, 15, 20]
+   nb_filters:
+      values: [32, 64, 128]
+name: LSST_sweep_01
+metric:
+   name: accuracy                              # This must match one of the metrics in the training script
+   goal: maximize
+early_terminate:
+   type: hyperband
+   min_iter: 3
+project: LSST_wandb_hpo
+
+
Writing sweep_config.yaml
+
+
+
+
fname = "sweep_config.yaml"
+sweep_config = yaml2dict(fname)
+print(sweep_config)
+test_eq(sweep_config.method, 'bayes')
+test_eq(sweep_config['metric'], {'name': 'accuracy', 'goal': 'maximize'})
+os.remove(fname)
+
+
{'program': 'wandb_scripts/train_script.py', 'method': 'bayes', 'parameters': {'bs': {'values': [32, 64, 128]}, 'depth': {'values': [3, 6, 9, 12]}, 'fc_dropout': {'distribution': 'uniform', 'min': 0.0, 'max': 0.5}, 'lr_max': {'values': [0.001, 0.003, 0.01, 0.03, 0.1]}, 'n_epoch': {'values': [10, 15, 20]}, 'nb_filters': {'values': [32, 64, 128]}}, 'name': 'LSST_sweep_01', 'metric': {'name': 'accuracy', 'goal': 'maximize'}, 'early_terminate': {'type': 'hyperband', 'min_iter': 3}, 'project': 'LSST_wandb_hpo'}
+
+
+
+

source

+
+
+

get_cat_cols

+
+
 get_cat_cols (df)
+
+
+

source

+
+
+

get_cont_cols

+
+
 get_cont_cols (df)
+
+
+

source

+
+
+

str2index

+
+
 str2index (o)
+
+
+

source

+
+
+

str2list

+
+
 str2list (o)
+
+
+

source

+
+
+

map_array

+
+
 map_array (arr, dim=1)
+
+
+

source

+
+
+

get_mapping

+
+
 get_mapping (arr, dim=1, return_counts=False)
+
+
+
a = np.asarray(alphabet[np.random.randint(0,15,30)]).reshape(10,3)
+b = np.asarray(ALPHABET[np.random.randint(6,10,30)]).reshape(10,3)
+x = concat(a,b,dim=1)
+maps, counts = get_mapping(x, dim=1, return_counts=True)
+x, maps, counts
+
+
(array([['d', 'k', 'l', 'I', 'I', 'G'],
+        ['g', 'i', 'l', 'I', 'J', 'I'],
+        ['e', 'l', 'n', 'G', 'H', 'I'],
+        ['e', 'l', 'a', 'I', 'H', 'G'],
+        ['k', 'l', 'b', 'I', 'I', 'J'],
+        ['c', 'f', 'k', 'I', 'H', 'I'],
+        ['e', 'j', 'f', 'I', 'H', 'J'],
+        ['n', 'd', 'g', 'G', 'J', 'J'],
+        ['d', 'f', 'a', 'I', 'H', 'H'],
+        ['i', 'c', 'm', 'J', 'G', 'G']], dtype='<U1'),
+ [(#7) ['c','d','e','g','i','k','n'],
+  (#7) ['c','d','f','i','j','k','l'],
+  (#8) ['a','b','f','g','k','l','m','n'],
+  (#3) ['G','I','J'],
+  (#4) ['G','H','I','J'],
+  (#4) ['G','H','I','J']],
+ [7, 7, 8, 3, 4, 4])
+
+
+
+
x = np.asarray(alphabet[np.random.randint(0,15,30)]).reshape(10,3)
+x, map_array(x), map_array(x, 1)
+
+
(array([['i', 'm', 'd'],
+        ['h', 'm', 'g'],
+        ['i', 'g', 'd'],
+        ['k', 'm', 'n'],
+        ['n', 'j', 'l'],
+        ['n', 'l', 'i'],
+        ['f', 'c', 'k'],
+        ['i', 'm', 'a'],
+        ['l', 'i', 'f'],
+        ['k', 'o', 'g']], dtype='<U1'),
+ array([[2, 5, 1],
+        [1, 5, 3],
+        [2, 1, 1],
+        [3, 5, 7],
+        [5, 3, 6],
+        [5, 4, 4],
+        [0, 0, 5],
+        [2, 5, 0],
+        [4, 2, 2],
+        [3, 6, 3]]),
+ array([[2, 5, 1],
+        [1, 5, 3],
+        [2, 1, 1],
+        [3, 5, 7],
+        [5, 3, 6],
+        [5, 4, 4],
+        [0, 0, 5],
+        [2, 5, 0],
+        [4, 2, 2],
+        [3, 6, 3]]))
+
+
+
+

source

+
+
+

log_tfm

+
+
 log_tfm (o, inplace=False)
+
+

Log transforms an array-like object with positive and/or negative values

+
+
arr = np.asarray([-1000, -100, -10, -1, 0, 1, 10, 100, 1000]).astype(float)
+plt.plot(arr, log_tfm(arr, False))
+plt.show()
+
+
+
+

+
+
+
+
+
+
t = tensor([-1000, -100, -10, -1, 0, 1, 10, 100, 1000]).float()
+plt.plot(t, log_tfm(t, False))
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

to_sincos_time

+
+
 to_sincos_time (arr, max_value)
+
+
+
arr = np.sort(np.random.rand(100) * 5)
+arr_sin, arr_cos = to_sincos_time(arr, 5)
+plt.scatter(arr, arr_sin)
+plt.scatter(arr, arr_cos)
+plt.show()
+
+
+
+

+
+
+
+
+
+

source

+
+
+

plot_feature_dist

+
+
 plot_feature_dist (X, percentiles=[0, 0.1, 0.5, 1, 5, 10, 25, 50, 75, 90,
+                    95, 99, 99.5, 99.9, 100])
+
+
+
arr = np.random.rand(10, 3, 100)
+plot_feature_dist(arr, percentiles=[0,0.1,0.5,1,5,10,25,50,75,90,95,99,99.5,99.9,100])
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

rolling_moving_average

+
+
 rolling_moving_average (o, window=2)
+
+
+
a = np.arange(60).reshape(2,3,10).astype(float)
+t = torch.arange(60).reshape(2,3,10).float()
+test_close(rolling_moving_average(a, window=3), rolling_moving_average(t, window=3).numpy())
+print(t)
+print(rolling_moving_average(t, window=3))
+
+
tensor([[[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9.],
+         [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
+         [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.]],
+
+        [[30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],
+         [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],
+         [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.]]])
+tensor([[[ 0.0000,  0.5000,  1.0000,  2.0000,  3.0000,  4.0000,  5.0000,
+           6.0000,  7.0000,  8.0000],
+         [10.0000, 10.5000, 11.0000, 12.0000, 13.0000, 14.0000, 15.0000,
+          16.0000, 17.0000, 18.0000],
+         [20.0000, 20.5000, 21.0000, 22.0000, 23.0000, 24.0000, 25.0000,
+          26.0000, 27.0000, 28.0000]],
+
+        [[30.0000, 30.5000, 31.0000, 32.0000, 33.0000, 34.0000, 35.0000,
+          36.0000, 37.0000, 38.0000],
+         [40.0000, 40.5000, 41.0000, 42.0000, 43.0000, 44.0000, 45.0000,
+          46.0000, 47.0000, 48.0000],
+         [50.0000, 50.5000, 51.0000, 52.0000, 53.0000, 54.0000, 55.0000,
+          56.0000, 57.0000, 58.0000]]])
+
+
+
+

source

+
+
+

fbfill_sequence

+
+
 fbfill_sequence (o)
+
+

Forward and backward fills an array-like object alongside sequence dimension

+
+

source

+
+
+

bfill_sequence

+
+
 bfill_sequence (o)
+
+

Backward fills an array-like object alongside sequence dimension

+
+

source

+
+
+

ffill_sequence

+
+
 ffill_sequence (o)
+
+

Forward fills an array-like object alongside sequence dimension

+
+
a = np.arange(80).reshape(2, 4, 10).astype(float)
+mask = np.random.rand(*a.shape)
+a[mask > .8] = np.nan
+t = torch.from_numpy(a)
+t
+
+
tensor([[[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8., nan],
+         [10., 11., nan, nan, 14., 15., nan, 17., nan, 19.],
+         [20., 21., 22., 23., nan, 25., 26., 27., 28., 29.],
+         [30., 31., 32., 33., nan, 35., 36., 37., 38., 39.]],
+
+        [[40., 41., 42., 43., 44., 45., 46., 47., nan, 49.],
+         [nan, 51., nan, 53., 54., 55., nan, 57., 58., 59.],
+         [60., 61., 62., 63., 64., nan, nan, 67., 68., 69.],
+         [70., nan, 72., 73., 74., 75., 76., nan, 78., 79.]]],
+       dtype=torch.float64)
+
+
+
+
# forward fill
+filled_a = ffill_sequence(a)
+print(filled_a)
+m = np.isnan(filled_a)
+test_eq(filled_a[~m], ffill_sequence(t).numpy()[~m])
+
+
[[[ 0.  1.  2.  3.  4.  5.  6.  7.  8.  8.]
+  [10. 11. 11. 11. 14. 15. 15. 17. 17. 19.]
+  [20. 21. 22. 23. 23. 25. 26. 27. 28. 29.]
+  [30. 31. 32. 33. 33. 35. 36. 37. 38. 39.]]
+
+ [[40. 41. 42. 43. 44. 45. 46. 47. 47. 49.]
+  [nan 51. 51. 53. 54. 55. 55. 57. 58. 59.]
+  [60. 61. 62. 63. 64. 64. 64. 67. 68. 69.]
+  [70. 70. 72. 73. 74. 75. 76. 76. 78. 79.]]]
+
+
+
+
# backward fill
+filled_a = bfill_sequence(a)
+print(filled_a)
+m = np.isnan(filled_a)
+test_eq(filled_a[~m], bfill_sequence(t).numpy()[~m])
+
+
[[[ 0.  1.  2.  3.  4.  5.  6.  7.  8. nan]
+  [10. 11. 14. 14. 14. 15. 17. 17. 19. 19.]
+  [20. 21. 22. 23. 25. 25. 26. 27. 28. 29.]
+  [30. 31. 32. 33. 35. 35. 36. 37. 38. 39.]]
+
+ [[40. 41. 42. 43. 44. 45. 46. 47. 49. 49.]
+  [51. 51. 53. 53. 54. 55. 57. 57. 58. 59.]
+  [60. 61. 62. 63. 64. 67. 67. 67. 68. 69.]
+  [70. 72. 72. 73. 74. 75. 76. 78. 78. 79.]]]
+
+
+
+
# forward & backward fill
+filled_a = fbfill_sequence(a)
+print(filled_a)
+m = np.isnan(filled_a)
+test_eq(filled_a[~m], fbfill_sequence(t).numpy()[~m])
+
+
[[[ 0.  1.  2.  3.  4.  5.  6.  7.  8.  8.]
+  [10. 11. 11. 11. 14. 15. 15. 17. 17. 19.]
+  [20. 21. 22. 23. 23. 25. 26. 27. 28. 29.]
+  [30. 31. 32. 33. 33. 35. 36. 37. 38. 39.]]
+
+ [[40. 41. 42. 43. 44. 45. 46. 47. 47. 49.]
+  [51. 51. 51. 53. 54. 55. 55. 57. 58. 59.]
+  [60. 61. 62. 63. 64. 64. 64. 67. 68. 69.]
+  [70. 70. 72. 73. 74. 75. 76. 76. 78. 79.]]]
+
+
+
+

source

+
+
+

dummify

+
+
 dummify (o:Union[numpy.ndarray,torch.Tensor], by_var:bool=True,
+          inplace:bool=False, skip:Optional[list]=None, random_state=None)
+
+

Shuffles an array-like object along all dimensions or dimension 1 (variables) if by_var is True.

+
+
arr = np.random.rand(2,3,10)
+arr_original = arr.copy()
+dummy_arr = dummify(arr)
+test_ne(arr_original, dummy_arr)
+test_eq(arr_original, arr)
+dummify(arr, inplace=True)
+test_ne(arr_original, arr)
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+
t = torch.rand(2,3,10)
+t_original = t.clone()
+dummy_t = dummify(t)
+test_ne(t_original, dummy_t)
+test_eq(t_original, t)
+dummify(t, inplace=True)
+test_ne(t_original, t)
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+

source

+
+
+

shuffle_along_axis

+
+
 shuffle_along_axis (o, axis=-1, random_state=None)
+
+
+
X = np.arange(60).reshape(2,3,10) + 10
+X_shuffled = shuffle_along_axis(X,(0, -1), random_state=23)
+test_eq(X_shuffled, np.array([[[13, 15, 41, 14, 40, 49, 18, 42, 47, 46],
+                               [28, 56, 53, 50, 52, 25, 24, 57, 51, 59],
+                               [34, 30, 38, 35, 69, 66, 63, 67, 61, 62]],
+
+                              [[19, 10, 11, 16, 43, 12, 17, 48, 45, 44],
+                               [23, 20, 26, 22, 21, 27, 58, 29, 54, 55],
+                               [36, 31, 39, 60, 33, 68, 37, 32, 65, 64]]]))
+
+
+

source

+
+
+

analyze_array

+
+
 analyze_array (o, bins=100, density=False, feature_names=None,
+                clip_outliers_plot=False, quantile_range=(25.0, 75.0),
+                percentiles=[1, 25, 50, 75, 99], text_len=12, figsize=(10,
+                6))
+
+
+

source

+
+
+

analyze_feature

+
+
 analyze_feature (feature, bins=100, density=False, feature_name=None,
+                  clip_outliers_plot=False, quantile_range=(25.0, 75.0),
+                  percentiles=[1, 25, 50, 75, 99], text_len=12,
+                  figsize=(10, 6))
+
+
+
x = np.random.normal(size=(1000))
+analyze_array(x)
+
+
 array shape: (1000,)
+       dtype: float64
+  nan values: 0.0%
+         max: 3.581094060980321
+           1: -2.1615590829115185
+          25: -0.5910961139851849
+          50: -0.002247946765973052
+          75: 0.6259274030927355
+          99: 2.3412961380708084
+         min: -2.9413736207935037
+ outlier min: -2.416631389602066
+ outlier max: 2.4514626787096163
+    outliers: 1.3%
+        mean: 0.0252125277963861
+         std: 0.946955486669799
+ normal dist: True
+
+
+
+
+

+
+
+
+
+
+
x1 = np.random.normal(size=(1000,2))
+x2 = np.random.normal(3, 5, size=(1000,2))
+x = x1 + x2
+analyze_array(x)
+
+
 array shape: (1000, 2)
+
+  0  feature: 0
+
+       dtype: float64
+  nan values: 0.0%
+         max: 20.323075761234193
+           1: -8.260661592413742
+          25: -0.6268118569038604
+          50: 2.7491159998190335
+          75: 6.1659732833324234
+          99: 15.387037197243288
+         min: -13.122296090020368
+ outlier min: -10.815989567258287
+ outlier max: 16.35515099368685
+    outliers: 0.9%
+        mean: 2.9347218553275445
+         std: 5.134940196769919
+ normal dist: True
+
+  1  feature: 1
+
+       dtype: float64
+  nan values: 0.0%
+         max: 19.86661808715871
+           1: -8.727124941895372
+          25: -0.45908489661153007
+          50: 2.875134866985423
+          75: 6.288434737224429
+          99: 14.424046274543118
+         min: -10.963913297285615
+ outlier min: -10.58036434736547
+ outlier max: 16.409714187978366
+    outliers: 0.6%
+        mean: 2.9552584127690014
+         std: 4.99683092772426
+ normal dist: True
+
+
+
+
+

+
+
+
+
+
+
+

+
+
+
+
+
+

source

+
+
+

get_relpath

+
+
 get_relpath (path)
+
+
+

source

+
+
+

to_root_path

+
+
 to_root_path (path)
+
+

Converts a path to an absolute path from the root directory of the repository.

+
+

source

+
+
+

get_root

+
+
 get_root ()
+
+

Returns the root directory of the git repository.

+
+

source

+
+
+

split_in_chunks

+
+
 split_in_chunks (o, chunksize, start=0, shuffle=False, drop_last=False)
+
+
+
a = np.arange(5, 15)
+test_eq(split_in_chunks(a, 3, drop_last=False), [array([5, 6, 7]), array([ 8,  9, 10]), array([11, 12, 13]), array([14])])
+test_eq(split_in_chunks(a, 3, drop_last=True), [array([5, 6, 7]), array([ 8,  9, 10]), array([11, 12, 13])])
+test_eq(split_in_chunks(a, 3, start=2, drop_last=True), [array([7, 8, 9]), array([10, 11, 12])])
+
+
+

source

+
+
+

load_object

+
+
 load_object (file_path)
+
+
+

source

+
+
+

save_object

+
+
 save_object (o, file_path, verbose=True)
+
+
+
split = np.arange(100)
+save_object(split, file_path='data/test')
+split2 = load_object('data/test.pkl')
+test_eq(split, split2)
+
+
data directory already exists.
+ndarray saved as data/test.pkl
+
+
+
+
splits = L([[[0,1,2,3,4], [5,6,7,8,9]],[[10,11,12,13,14], [15,16,17,18,19]]])
+save_object(splits, file_path=Path('data/test'))
+splits2 = load_object('data/test')
+test_eq(splits, splits2)
+
+
data directory already exists.
+L saved as data/test.pkl
+
+
+
+

source

+
+
+

get_idxs_to_keep

+
+
 get_idxs_to_keep (o, cond, crit='all', invert=False, axis=(1, 2),
+                   keepdims=False)
+
+
+
a = np.random.rand(100, 2, 10)
+a[a > .95] = np.nan
+idxs_to_keep = get_idxs_to_keep(a, np.isfinite)
+if idxs_to_keep.size>0:
+    test_eq(np.isnan(a[idxs_to_keep]).sum(), 0)
+
+
+

source

+
+
+

zerofy

+
+
 zerofy (a, stride, keep=False)
+
+

Create copies of an array setting individual/ group values to zero

+
+
stride = 3
+a = np.arange(2*5).reshape(2,5) + 1
+
+zerofy(a, stride, keep=False)
+
+
array([[[ 0.,  0.,  3.,  4.,  5.],
+        [ 6.,  7.,  8.,  9., 10.]],
+
+       [[ 1.,  2.,  0.,  0.,  0.],
+        [ 6.,  7.,  8.,  9., 10.]],
+
+       [[ 1.,  2.,  3.,  4.,  5.],
+        [ 0.,  0.,  8.,  9., 10.]],
+
+       [[ 1.,  2.,  3.,  4.,  5.],
+        [ 6.,  7.,  0.,  0.,  0.]]])
+
+
+
+

source

+
+
+

feat2list

+
+
 feat2list (o)
+
+
+
a = 'a'
+test_eq(feat2list(a), ['a'])
+a = ['a', 'b']
+test_eq(feat2list(a), ['a', 'b'])
+a = None
+test_eq(feat2list(a), [])
+
+
+

source

+
+
+

smallest_dtype

+
+
 smallest_dtype (num, use_unsigned=False)
+
+

Find the smallest dtype that can safely hold num

+
+
test_eq(smallest_dtype(3654), 'int16')
+test_eq(smallest_dtype(2048.), 'float16')
+test_eq(smallest_dtype(365454), 'int32')
+test_eq(smallest_dtype(365454.), 'float32')
+test_eq(smallest_dtype(3654545134897), 'int64')
+
+
+

source

+
+
+

plot_forecast

+
+
 plot_forecast (X_true, y_true, y_pred, sel_vars=None, idx=None,
+                figsize=(8, 4), n_samples=1)
+
+
+

source

+
+
+

str2callable

+
+
 str2callable (object_path:str=None)
+
+

Transform a string into a callable object without importing it in the script.

+ + + + + + + + + + + + + + + + + +
TypeDefaultDetails
object_pathstrNoneThe string representing the object path.
+
+
# test showing you don't need to import the object in the script. The library needs to be installed though.
+try:
+    pyts
+except Exception as e:
+    print(0, e)
+try:
+    pyts.image
+except Exception as e:
+    print(1, e)
+try:
+    gasf = eval("pyts.image.GramianAngularField(method='summation')")
+    print(f"2 success: {gasf}")
+except Exception as e:
+    print(2, e)
+try:
+    gasf = str2callable("pyts.image.GramianAngularField(method='summation')")
+    print(f"3 success: {gasf}")
+except Exception as e:
+    print(3, e)
+
+
0 name 'pyts' is not defined
+1 name 'pyts' is not defined
+2 name 'pyts' is not defined
+3 success: GramianAngularField()
+
+
+ + +
+ +
+ +
+ + + + + \ No newline at end of file diff --git a/wandb.html b/wandb.html new file mode 100644 index 000000000..ef7dd191c --- /dev/null +++ b/wandb.html @@ -0,0 +1,1242 @@ + + + + + + + + + +tsai - Weights & Biases Sweeps + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ +
+ + +
+ + + +
+ +
+
+

Weights & Biases Sweeps

+
+ + + +
+ + + + +
+ + + +
+ + + +
+

Weights & Biases Sweeps are used to automate hyperparameter optimization and explore the space of possible models.

+
+
+

source

+
+

wandb_agent

+
+
 wandb_agent (script_path, sweep, entity=None, project=None, count=None,
+              run=True)
+
+

Run wandb agent with sweep and `script_path

+
+

source

+
+
+

update_run_config

+
+
 update_run_config (config, new_config, verbose=False)
+
+

Update config with new_config

+
+

source

+
+
+

get_sweep_config

+
+
 get_sweep_config (config)
+
+

Get sweep config from config

+ + +
+ +
+ +
+ + + + + \ No newline at end of file