From 082815916900450485bd14cf1c7a83593e51825d Mon Sep 17 00:00:00 2001 From: ultrafunkamsterdam Date: Sun, 14 Jul 2024 17:27:04 +0200 Subject: [PATCH] 0.34 - added host and port options to connect to existing debugabble browser (over network as well). updated docs --- docs/_build/html/.buildinfo | 4 + docs/_build/html/_modules/index.html | 360 + .../_modules/nodriver/cdp/accessibility.html | 1097 ++ .../html/_modules/nodriver/cdp/animation.html | 879 ++ .../html/_modules/nodriver/cdp/audits.html | 2388 ++++ .../html/_modules/nodriver/cdp/autofill.html | 627 + .../nodriver/cdp/background_service.html | 553 + .../html/_modules/nodriver/cdp/browser.html | 1145 ++ .../_modules/nodriver/cdp/cache_storage.html | 652 + .../html/_modules/nodriver/cdp/cast.html | 497 + .../html/_modules/nodriver/cdp/console.html | 426 + .../html/_modules/nodriver/cdp/css.html | 3060 ++++ .../html/_modules/nodriver/cdp/database.html | 504 + .../html/_modules/nodriver/cdp/debugger.html | 1989 +++ .../_modules/nodriver/cdp/device_access.html | 468 + .../nodriver/cdp/device_orientation.html | 353 + .../html/_modules/nodriver/cdp/dom.html | 2706 ++++ .../_modules/nodriver/cdp/dom_debugger.html | 669 + .../_modules/nodriver/cdp/dom_snapshot.html | 1484 ++ .../_modules/nodriver/cdp/dom_storage.html | 565 + .../html/_modules/nodriver/cdp/emulation.html | 1597 +++ .../nodriver/cdp/event_breakpoints.html | 367 + .../_modules/nodriver/cdp/extensions.html | 336 + .../html/_modules/nodriver/cdp/fed_cm.html | 638 + .../html/_modules/nodriver/cdp/fetch.html | 906 ++ .../nodriver/cdp/headless_experimental.html | 444 + .../_modules/nodriver/cdp/heap_profiler.html | 770 + .../_modules/nodriver/cdp/indexed_db.html | 905 ++ .../html/_modules/nodriver/cdp/input_.html | 1087 ++ .../html/_modules/nodriver/cdp/inspector.html | 388 + .../_build/html/_modules/nodriver/cdp/io.html | 420 + .../_modules/nodriver/cdp/layer_tree.html | 881 ++ .../html/_modules/nodriver/cdp/log.html | 541 + .../html/_modules/nodriver/cdp/media.html | 643 + .../html/_modules/nodriver/cdp/memory.html | 617 + .../html/_modules/nodriver/cdp/network.html | 5323 +++++++ .../html/_modules/nodriver/cdp/overlay.html | 2152 +++ .../html/_modules/nodriver/cdp/page.html | 4894 +++++++ .../_modules/nodriver/cdp/performance.html | 446 + .../nodriver/cdp/performance_timeline.html | 545 + .../html/_modules/nodriver/cdp/preload.html | 984 ++ .../html/_modules/nodriver/cdp/profiler.html | 814 ++ .../html/_modules/nodriver/cdp/pwa.html | 593 + .../html/_modules/nodriver/cdp/runtime.html | 2308 +++ .../html/_modules/nodriver/cdp/schema.html | 359 + .../html/_modules/nodriver/cdp/security.html | 922 ++ .../_modules/nodriver/cdp/service_worker.html | 794 ++ .../html/_modules/nodriver/cdp/storage.html | 2702 ++++ .../_modules/nodriver/cdp/system_info.html | 744 + .../html/_modules/nodriver/cdp/target.html | 1140 ++ .../html/_modules/nodriver/cdp/tethering.html | 370 + .../html/_modules/nodriver/cdp/tracing.html | 765 + .../html/_modules/nodriver/cdp/web_audio.html | 1038 ++ .../html/_modules/nodriver/cdp/web_authn.html | 950 ++ .../_modules/nodriver/core/_contradict.html | 427 + .../html/_modules/nodriver/core/browser.html | 1155 ++ .../html/_modules/nodriver/core/config.html | 626 + .../_modules/nodriver/core/connection.html | 918 ++ .../html/_modules/nodriver/core/element.html | 1548 ++ .../html/_modules/nodriver/core/tab.html | 1754 +++ docs/_build/html/_sources/index.rst.txt | 103 + .../_build/html/_sources/nodriver/cdp.rst.txt | 8 + .../nodriver/cdp/accessibility.rst.txt | 113 + .../_sources/nodriver/cdp/animation.rst.txt | 102 + .../html/_sources/nodriver/cdp/audits.rst.txt | 294 + .../_sources/nodriver/cdp/autofill.rst.txt | 87 + .../nodriver/cdp/background_service.rst.txt | 72 + .../_sources/nodriver/cdp/browser.rst.txt | 133 + .../nodriver/cdp/cache_storage.rst.txt | 75 + .../html/_sources/nodriver/cdp/cast.rst.txt | 67 + .../_sources/nodriver/cdp/console.rst.txt | 53 + .../html/_sources/nodriver/cdp/css.rst.txt | 321 + .../_sources/nodriver/cdp/database.rst.txt | 65 + .../_sources/nodriver/cdp/debugger.rst.txt | 187 + .../nodriver/cdp/device_access.rst.txt | 65 + .../nodriver/cdp/device_orientation.rst.txt | 36 + .../html/_sources/nodriver/cdp/dom.rst.txt | 300 + .../nodriver/cdp/dom_debugger.rst.txt | 71 + .../nodriver/cdp/dom_snapshot.rst.txt | 120 + .../_sources/nodriver/cdp/dom_storage.rst.txt | 86 + .../_sources/nodriver/cdp/emulation.rst.txt | 180 + .../nodriver/cdp/event_breakpoints.rst.txt | 42 + .../_sources/nodriver/cdp/extensions.rst.txt | 38 + .../html/_sources/nodriver/cdp/fed_cm.rst.txt | 88 + .../html/_sources/nodriver/cdp/fetch.rst.txt | 95 + .../cdp/headless_experimental.rst.txt | 48 + .../nodriver/cdp/heap_profiler.rst.txt | 106 + .../_sources/nodriver/cdp/indexed_db.rst.txt | 88 + .../html/_sources/nodriver/cdp/input_.rst.txt | 96 + .../_sources/nodriver/cdp/inspector.rst.txt | 53 + .../html/_sources/nodriver/cdp/io.rst.txt | 46 + .../_sources/nodriver/cdp/layer_tree.rst.txt | 100 + .../html/_sources/nodriver/cdp/log.rst.txt | 62 + .../html/_sources/nodriver/cdp/media.rst.txt | 103 + .../html/_sources/nodriver/cdp/memory.rst.txt | 75 + .../_sources/nodriver/cdp/network.rst.txt | 616 + .../_sources/nodriver/cdp/overlay.rst.txt | 217 + .../html/_sources/nodriver/cdp/page.rst.txt | 582 + .../_sources/nodriver/cdp/performance.rst.txt | 53 + .../nodriver/cdp/performance_timeline.rst.txt | 67 + .../_sources/nodriver/cdp/preload.rst.txt | 126 + .../_sources/nodriver/cdp/profiler.rst.txt | 98 + .../html/_sources/nodriver/cdp/pwa.rst.txt | 66 + .../_sources/nodriver/cdp/runtime.rst.txt | 242 + .../html/_sources/nodriver/cdp/schema.rst.txt | 42 + .../_sources/nodriver/cdp/security.rst.txt | 112 + .../nodriver/cdp/service_worker.rst.txt | 108 + .../_sources/nodriver/cdp/storage.rst.txt | 367 + .../_sources/nodriver/cdp/system_info.rst.txt | 88 + .../html/_sources/nodriver/cdp/target.rst.txt | 136 + .../_sources/nodriver/cdp/tethering.rst.txt | 45 + .../_sources/nodriver/cdp/tracing.rst.txt | 90 + .../_sources/nodriver/cdp/web_audio.rst.txt | 176 + .../_sources/nodriver/cdp/web_authn.rst.txt | 106 + .../_sources/nodriver/classes/browser.rst.txt | 42 + .../_sources/nodriver/classes/element.rst.txt | 14 + .../classes/others_and_helpers.rst.txt | 38 + .../_sources/nodriver/classes/tab.rst.txt | 13 + .../html/_sources/nodriver/quickstart.rst.txt | 262 + docs/_build/html/_sources/readme.rst.txt | 271 + docs/_build/html/_sources/style.rst.txt | 39 + docs/_build/html/_static/basic.css | 925 ++ docs/_build/html/_static/custom.css | 21 + docs/_build/html/_static/debug.css | 69 + docs/_build/html/_static/doctools.js | 156 + .../html/_static/documentation_options.js | 13 + docs/_build/html/_static/file.png | Bin 0 -> 286 bytes docs/_build/html/_static/language_data.js | 199 + docs/_build/html/_static/minus.png | Bin 0 -> 90 bytes docs/_build/html/_static/plus.png | Bin 0 -> 90 bytes docs/_build/html/_static/pygments.css | 249 + .../html/_static/scripts/furo-extensions.js | 0 docs/_build/html/_static/scripts/furo.js | 3 + .../html/_static/scripts/furo.js.LICENSE.txt | 7 + docs/_build/html/_static/scripts/furo.js.map | 1 + docs/_build/html/_static/searchtools.js | 574 + docs/_build/html/_static/skeleton.css | 296 + docs/_build/html/_static/sphinx_highlight.js | 154 + .../html/_static/styles/furo-extensions.css | 2 + .../_static/styles/furo-extensions.css.map | 1 + docs/_build/html/_static/styles/furo.css | 2 + docs/_build/html/_static/styles/furo.css.map | 1 + docs/_build/html/genindex.html | 11667 ++++++++++++++++ docs/_build/html/index.html | 7232 ++++++++++ docs/_build/html/nodriver/cdp.html | 380 + .../html/nodriver/cdp/accessibility.html | 1329 ++ docs/_build/html/nodriver/cdp/animation.html | 871 ++ docs/_build/html/nodriver/cdp/audits.html | 2941 ++++ docs/_build/html/nodriver/cdp/autofill.html | 667 + .../html/nodriver/cdp/background_service.html | 614 + docs/_build/html/nodriver/cdp/browser.html | 1232 ++ .../html/nodriver/cdp/cache_storage.html | 679 + docs/_build/html/nodriver/cdp/cast.html | 535 + docs/_build/html/nodriver/cdp/console.html | 485 + docs/_build/html/nodriver/cdp/css.html | 2584 ++++ docs/_build/html/nodriver/cdp/database.html | 529 + docs/_build/html/nodriver/cdp/debugger.html | 1757 +++ .../html/nodriver/cdp/device_access.html | 496 + .../html/nodriver/cdp/device_orientation.html | 403 + docs/_build/html/nodriver/cdp/dom.html | 2501 ++++ .../html/nodriver/cdp/dom_debugger.html | 689 + .../html/nodriver/cdp/dom_snapshot.html | 1262 ++ .../_build/html/nodriver/cdp/dom_storage.html | 607 + docs/_build/html/nodriver/cdp/emulation.html | 1411 ++ .../html/nodriver/cdp/event_breakpoints.html | 417 + docs/_build/html/nodriver/cdp/extensions.html | 395 + docs/_build/html/nodriver/cdp/fed_cm.html | 718 + docs/_build/html/nodriver/cdp/fetch.html | 865 ++ .../nodriver/cdp/headless_experimental.html | 467 + .../html/nodriver/cdp/heap_profiler.html | 748 + docs/_build/html/nodriver/cdp/indexed_db.html | 817 ++ docs/_build/html/nodriver/cdp/input_.html | 957 ++ docs/_build/html/nodriver/cdp/inspector.html | 430 + docs/_build/html/nodriver/cdp/io.html | 446 + docs/_build/html/nodriver/cdp/layer_tree.html | 812 ++ docs/_build/html/nodriver/cdp/log.html | 569 + docs/_build/html/nodriver/cdp/media.html | 662 + docs/_build/html/nodriver/cdp/memory.html | 638 + docs/_build/html/nodriver/cdp/network.html | 5692 ++++++++ docs/_build/html/nodriver/cdp/overlay.html | 1990 +++ docs/_build/html/nodriver/cdp/page.html | 5679 ++++++++ .../_build/html/nodriver/cdp/performance.html | 492 + .../nodriver/cdp/performance_timeline.html | 583 + docs/_build/html/nodriver/cdp/preload.html | 1489 ++ docs/_build/html/nodriver/cdp/profiler.html | 823 ++ docs/_build/html/nodriver/cdp/pwa.html | 609 + docs/_build/html/nodriver/cdp/runtime.html | 1820 +++ docs/_build/html/nodriver/cdp/schema.html | 415 + docs/_build/html/nodriver/cdp/security.html | 999 ++ .../html/nodriver/cdp/service_worker.html | 835 ++ docs/_build/html/nodriver/cdp/storage.html | 2900 ++++ .../_build/html/nodriver/cdp/system_info.html | 780 ++ docs/_build/html/nodriver/cdp/target.html | 1033 ++ docs/_build/html/nodriver/cdp/tethering.html | 430 + docs/_build/html/nodriver/cdp/tracing.html | 746 + docs/_build/html/nodriver/cdp/web_audio.html | 1094 ++ docs/_build/html/nodriver/cdp/web_authn.html | 921 ++ .../_build/html/nodriver/classes/browser.html | 583 + .../_build/html/nodriver/classes/element.html | 952 ++ .../nodriver/classes/others_and_helpers.html | 615 + docs/_build/html/nodriver/classes/tab.html | 1151 ++ docs/_build/html/nodriver/quickstart.html | 576 + docs/_build/html/objects.inv | Bin 0 -> 41414 bytes docs/_build/html/py-modindex.html | 638 + docs/_build/html/readme.html | 547 + docs/_build/html/search.html | 313 + docs/_build/html/searchindex.js | 1 + docs/_build/html/style.html | 379 + docs/_build/markdown/index.md | 5170 +++++++ docs/_build/markdown/nodriver/cdp.md | 54 + .../markdown/nodriver/cdp/accessibility.md | 471 + .../_build/markdown/nodriver/cdp/animation.md | 309 + docs/_build/markdown/nodriver/cdp/audits.md | 988 ++ docs/_build/markdown/nodriver/cdp/autofill.md | 182 + .../nodriver/cdp/background_service.md | 146 + docs/_build/markdown/nodriver/cdp/browser.md | 476 + .../markdown/nodriver/cdp/cache_storage.md | 185 + docs/_build/markdown/nodriver/cdp/cast.md | 117 + docs/_build/markdown/nodriver/cdp/console.md | 90 + docs/_build/markdown/nodriver/cdp/css.md | 1265 ++ docs/_build/markdown/nodriver/cdp/database.md | 107 + docs/_build/markdown/nodriver/cdp/debugger.md | 834 ++ .../markdown/nodriver/cdp/device_access.md | 92 + .../nodriver/cdp/device_orientation.md | 45 + docs/_build/markdown/nodriver/cdp/dom.md | 1226 ++ .../markdown/nodriver/cdp/dom_debugger.md | 209 + .../markdown/nodriver/cdp/dom_snapshot.md | 525 + .../markdown/nodriver/cdp/dom_storage.md | 130 + .../_build/markdown/nodriver/cdp/emulation.md | 587 + .../nodriver/cdp/event_breakpoints.md | 56 + .../markdown/nodriver/cdp/extensions.md | 44 + docs/_build/markdown/nodriver/cdp/fed_cm.md | 174 + docs/_build/markdown/nodriver/cdp/fetch.md | 322 + .../nodriver/cdp/headless_experimental.md | 89 + .../markdown/nodriver/cdp/heap_profiler.md | 209 + .../markdown/nodriver/cdp/indexed_db.md | 281 + docs/_build/markdown/nodriver/cdp/input_.md | 367 + .../_build/markdown/nodriver/cdp/inspector.md | 59 + docs/_build/markdown/nodriver/cdp/io.md | 71 + .../markdown/nodriver/cdp/layer_tree.md | 272 + docs/_build/markdown/nodriver/cdp/log.md | 136 + docs/_build/markdown/nodriver/cdp/media.md | 164 + docs/_build/markdown/nodriver/cdp/memory.md | 167 + docs/_build/markdown/nodriver/cdp/network.md | 2710 ++++ docs/_build/markdown/nodriver/cdp/overlay.md | 917 ++ docs/_build/markdown/nodriver/cdp/page.md | 2546 ++++ .../markdown/nodriver/cdp/performance.md | 97 + .../nodriver/cdp/performance_timeline.md | 125 + docs/_build/markdown/nodriver/cdp/preload.md | 454 + docs/_build/markdown/nodriver/cdp/profiler.md | 268 + docs/_build/markdown/nodriver/cdp/pwa.md | 169 + docs/_build/markdown/nodriver/cdp/runtime.md | 849 ++ docs/_build/markdown/nodriver/cdp/schema.md | 51 + docs/_build/markdown/nodriver/cdp/security.md | 363 + .../markdown/nodriver/cdp/service_worker.md | 217 + docs/_build/markdown/nodriver/cdp/storage.md | 1176 ++ .../markdown/nodriver/cdp/system_info.md | 245 + docs/_build/markdown/nodriver/cdp/target.md | 404 + .../_build/markdown/nodriver/cdp/tethering.md | 61 + docs/_build/markdown/nodriver/cdp/tracing.md | 226 + .../_build/markdown/nodriver/cdp/web_audio.md | 323 + .../_build/markdown/nodriver/cdp/web_authn.md | 318 + .../markdown/nodriver/classes/browser.md | 161 + .../markdown/nodriver/classes/element.md | 315 + .../nodriver/classes/others_and_helpers.md | 161 + docs/_build/markdown/nodriver/classes/tab.md | 515 + docs/_build/markdown/nodriver/quickstart.md | 241 + docs/_build/markdown/readme.md | 249 + docs/_build/markdown/style.md | 32 + 269 files changed, 189213 insertions(+) create mode 100644 docs/_build/html/.buildinfo create mode 100644 docs/_build/html/_modules/index.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/accessibility.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/animation.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/audits.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/autofill.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/background_service.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/browser.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/cache_storage.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/cast.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/console.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/css.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/database.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/debugger.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/device_access.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/device_orientation.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/dom.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/dom_debugger.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/dom_snapshot.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/dom_storage.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/emulation.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/event_breakpoints.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/extensions.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/fed_cm.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/fetch.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/headless_experimental.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/heap_profiler.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/indexed_db.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/input_.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/inspector.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/io.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/layer_tree.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/log.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/media.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/memory.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/network.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/overlay.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/page.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/performance.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/performance_timeline.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/preload.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/profiler.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/pwa.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/runtime.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/schema.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/security.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/service_worker.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/storage.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/system_info.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/target.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/tethering.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/tracing.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/web_audio.html create mode 100644 docs/_build/html/_modules/nodriver/cdp/web_authn.html create mode 100644 docs/_build/html/_modules/nodriver/core/_contradict.html create mode 100644 docs/_build/html/_modules/nodriver/core/browser.html create mode 100644 docs/_build/html/_modules/nodriver/core/config.html create mode 100644 docs/_build/html/_modules/nodriver/core/connection.html create mode 100644 docs/_build/html/_modules/nodriver/core/element.html create mode 100644 docs/_build/html/_modules/nodriver/core/tab.html create mode 100644 docs/_build/html/_sources/index.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/accessibility.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/animation.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/audits.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/autofill.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/background_service.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/browser.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/cache_storage.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/cast.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/console.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/css.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/database.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/debugger.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/device_access.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/device_orientation.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/dom.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/dom_debugger.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/dom_snapshot.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/dom_storage.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/emulation.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/event_breakpoints.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/extensions.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/fed_cm.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/fetch.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/headless_experimental.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/heap_profiler.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/indexed_db.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/input_.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/inspector.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/io.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/layer_tree.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/log.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/media.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/memory.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/network.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/overlay.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/page.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/performance.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/performance_timeline.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/preload.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/profiler.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/pwa.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/runtime.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/schema.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/security.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/service_worker.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/storage.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/system_info.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/target.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/tethering.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/tracing.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/web_audio.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/cdp/web_authn.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/classes/browser.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/classes/element.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/classes/others_and_helpers.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/classes/tab.rst.txt create mode 100644 docs/_build/html/_sources/nodriver/quickstart.rst.txt create mode 100644 docs/_build/html/_sources/readme.rst.txt create mode 100644 docs/_build/html/_sources/style.rst.txt create mode 100644 docs/_build/html/_static/basic.css create mode 100644 docs/_build/html/_static/custom.css create mode 100644 docs/_build/html/_static/debug.css create mode 100644 docs/_build/html/_static/doctools.js create mode 100644 docs/_build/html/_static/documentation_options.js create mode 100644 docs/_build/html/_static/file.png create mode 100644 docs/_build/html/_static/language_data.js create mode 100644 docs/_build/html/_static/minus.png create mode 100644 docs/_build/html/_static/plus.png create mode 100644 docs/_build/html/_static/pygments.css create mode 100644 docs/_build/html/_static/scripts/furo-extensions.js create mode 100644 docs/_build/html/_static/scripts/furo.js create mode 100644 docs/_build/html/_static/scripts/furo.js.LICENSE.txt create mode 100644 docs/_build/html/_static/scripts/furo.js.map create mode 100644 docs/_build/html/_static/searchtools.js create mode 100644 docs/_build/html/_static/skeleton.css create mode 100644 docs/_build/html/_static/sphinx_highlight.js create mode 100644 docs/_build/html/_static/styles/furo-extensions.css create mode 100644 docs/_build/html/_static/styles/furo-extensions.css.map create mode 100644 docs/_build/html/_static/styles/furo.css create mode 100644 docs/_build/html/_static/styles/furo.css.map create mode 100644 docs/_build/html/genindex.html create mode 100644 docs/_build/html/index.html create mode 100644 docs/_build/html/nodriver/cdp.html create mode 100644 docs/_build/html/nodriver/cdp/accessibility.html create mode 100644 docs/_build/html/nodriver/cdp/animation.html create mode 100644 docs/_build/html/nodriver/cdp/audits.html create mode 100644 docs/_build/html/nodriver/cdp/autofill.html create mode 100644 docs/_build/html/nodriver/cdp/background_service.html create mode 100644 docs/_build/html/nodriver/cdp/browser.html create mode 100644 docs/_build/html/nodriver/cdp/cache_storage.html create mode 100644 docs/_build/html/nodriver/cdp/cast.html create mode 100644 docs/_build/html/nodriver/cdp/console.html create mode 100644 docs/_build/html/nodriver/cdp/css.html create mode 100644 docs/_build/html/nodriver/cdp/database.html create mode 100644 docs/_build/html/nodriver/cdp/debugger.html create mode 100644 docs/_build/html/nodriver/cdp/device_access.html create mode 100644 docs/_build/html/nodriver/cdp/device_orientation.html create mode 100644 docs/_build/html/nodriver/cdp/dom.html create mode 100644 docs/_build/html/nodriver/cdp/dom_debugger.html create mode 100644 docs/_build/html/nodriver/cdp/dom_snapshot.html create mode 100644 docs/_build/html/nodriver/cdp/dom_storage.html create mode 100644 docs/_build/html/nodriver/cdp/emulation.html create mode 100644 docs/_build/html/nodriver/cdp/event_breakpoints.html create mode 100644 docs/_build/html/nodriver/cdp/extensions.html create mode 100644 docs/_build/html/nodriver/cdp/fed_cm.html create mode 100644 docs/_build/html/nodriver/cdp/fetch.html create mode 100644 docs/_build/html/nodriver/cdp/headless_experimental.html create mode 100644 docs/_build/html/nodriver/cdp/heap_profiler.html create mode 100644 docs/_build/html/nodriver/cdp/indexed_db.html create mode 100644 docs/_build/html/nodriver/cdp/input_.html create mode 100644 docs/_build/html/nodriver/cdp/inspector.html create mode 100644 docs/_build/html/nodriver/cdp/io.html create mode 100644 docs/_build/html/nodriver/cdp/layer_tree.html create mode 100644 docs/_build/html/nodriver/cdp/log.html create mode 100644 docs/_build/html/nodriver/cdp/media.html create mode 100644 docs/_build/html/nodriver/cdp/memory.html create mode 100644 docs/_build/html/nodriver/cdp/network.html create mode 100644 docs/_build/html/nodriver/cdp/overlay.html create mode 100644 docs/_build/html/nodriver/cdp/page.html create mode 100644 docs/_build/html/nodriver/cdp/performance.html create mode 100644 docs/_build/html/nodriver/cdp/performance_timeline.html create mode 100644 docs/_build/html/nodriver/cdp/preload.html create mode 100644 docs/_build/html/nodriver/cdp/profiler.html create mode 100644 docs/_build/html/nodriver/cdp/pwa.html create mode 100644 docs/_build/html/nodriver/cdp/runtime.html create mode 100644 docs/_build/html/nodriver/cdp/schema.html create mode 100644 docs/_build/html/nodriver/cdp/security.html create mode 100644 docs/_build/html/nodriver/cdp/service_worker.html create mode 100644 docs/_build/html/nodriver/cdp/storage.html create mode 100644 docs/_build/html/nodriver/cdp/system_info.html create mode 100644 docs/_build/html/nodriver/cdp/target.html create mode 100644 docs/_build/html/nodriver/cdp/tethering.html create mode 100644 docs/_build/html/nodriver/cdp/tracing.html create mode 100644 docs/_build/html/nodriver/cdp/web_audio.html create mode 100644 docs/_build/html/nodriver/cdp/web_authn.html create mode 100644 docs/_build/html/nodriver/classes/browser.html create mode 100644 docs/_build/html/nodriver/classes/element.html create mode 100644 docs/_build/html/nodriver/classes/others_and_helpers.html create mode 100644 docs/_build/html/nodriver/classes/tab.html create mode 100644 docs/_build/html/nodriver/quickstart.html create mode 100644 docs/_build/html/objects.inv create mode 100644 docs/_build/html/py-modindex.html create mode 100644 docs/_build/html/readme.html create mode 100644 docs/_build/html/search.html create mode 100644 docs/_build/html/searchindex.js create mode 100644 docs/_build/html/style.html create mode 100644 docs/_build/markdown/index.md create mode 100644 docs/_build/markdown/nodriver/cdp.md create mode 100644 docs/_build/markdown/nodriver/cdp/accessibility.md create mode 100644 docs/_build/markdown/nodriver/cdp/animation.md create mode 100644 docs/_build/markdown/nodriver/cdp/audits.md create mode 100644 docs/_build/markdown/nodriver/cdp/autofill.md create mode 100644 docs/_build/markdown/nodriver/cdp/background_service.md create mode 100644 docs/_build/markdown/nodriver/cdp/browser.md create mode 100644 docs/_build/markdown/nodriver/cdp/cache_storage.md create mode 100644 docs/_build/markdown/nodriver/cdp/cast.md create mode 100644 docs/_build/markdown/nodriver/cdp/console.md create mode 100644 docs/_build/markdown/nodriver/cdp/css.md create mode 100644 docs/_build/markdown/nodriver/cdp/database.md create mode 100644 docs/_build/markdown/nodriver/cdp/debugger.md create mode 100644 docs/_build/markdown/nodriver/cdp/device_access.md create mode 100644 docs/_build/markdown/nodriver/cdp/device_orientation.md create mode 100644 docs/_build/markdown/nodriver/cdp/dom.md create mode 100644 docs/_build/markdown/nodriver/cdp/dom_debugger.md create mode 100644 docs/_build/markdown/nodriver/cdp/dom_snapshot.md create mode 100644 docs/_build/markdown/nodriver/cdp/dom_storage.md create mode 100644 docs/_build/markdown/nodriver/cdp/emulation.md create mode 100644 docs/_build/markdown/nodriver/cdp/event_breakpoints.md create mode 100644 docs/_build/markdown/nodriver/cdp/extensions.md create mode 100644 docs/_build/markdown/nodriver/cdp/fed_cm.md create mode 100644 docs/_build/markdown/nodriver/cdp/fetch.md create mode 100644 docs/_build/markdown/nodriver/cdp/headless_experimental.md create mode 100644 docs/_build/markdown/nodriver/cdp/heap_profiler.md create mode 100644 docs/_build/markdown/nodriver/cdp/indexed_db.md create mode 100644 docs/_build/markdown/nodriver/cdp/input_.md create mode 100644 docs/_build/markdown/nodriver/cdp/inspector.md create mode 100644 docs/_build/markdown/nodriver/cdp/io.md create mode 100644 docs/_build/markdown/nodriver/cdp/layer_tree.md create mode 100644 docs/_build/markdown/nodriver/cdp/log.md create mode 100644 docs/_build/markdown/nodriver/cdp/media.md create mode 100644 docs/_build/markdown/nodriver/cdp/memory.md create mode 100644 docs/_build/markdown/nodriver/cdp/network.md create mode 100644 docs/_build/markdown/nodriver/cdp/overlay.md create mode 100644 docs/_build/markdown/nodriver/cdp/page.md create mode 100644 docs/_build/markdown/nodriver/cdp/performance.md create mode 100644 docs/_build/markdown/nodriver/cdp/performance_timeline.md create mode 100644 docs/_build/markdown/nodriver/cdp/preload.md create mode 100644 docs/_build/markdown/nodriver/cdp/profiler.md create mode 100644 docs/_build/markdown/nodriver/cdp/pwa.md create mode 100644 docs/_build/markdown/nodriver/cdp/runtime.md create mode 100644 docs/_build/markdown/nodriver/cdp/schema.md create mode 100644 docs/_build/markdown/nodriver/cdp/security.md create mode 100644 docs/_build/markdown/nodriver/cdp/service_worker.md create mode 100644 docs/_build/markdown/nodriver/cdp/storage.md create mode 100644 docs/_build/markdown/nodriver/cdp/system_info.md create mode 100644 docs/_build/markdown/nodriver/cdp/target.md create mode 100644 docs/_build/markdown/nodriver/cdp/tethering.md create mode 100644 docs/_build/markdown/nodriver/cdp/tracing.md create mode 100644 docs/_build/markdown/nodriver/cdp/web_audio.md create mode 100644 docs/_build/markdown/nodriver/cdp/web_authn.md create mode 100644 docs/_build/markdown/nodriver/classes/browser.md create mode 100644 docs/_build/markdown/nodriver/classes/element.md create mode 100644 docs/_build/markdown/nodriver/classes/others_and_helpers.md create mode 100644 docs/_build/markdown/nodriver/classes/tab.md create mode 100644 docs/_build/markdown/nodriver/quickstart.md create mode 100644 docs/_build/markdown/readme.md create mode 100644 docs/_build/markdown/style.md diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo new file mode 100644 index 0000000..2281322 --- /dev/null +++ b/docs/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 289d95cc9169fcb0c76a899cfdf2c08c +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 0000000..9c71737 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,360 @@ + + + + + + + + Overview: module code - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

All modules for which code is available

+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/accessibility.html b/docs/_build/html/_modules/nodriver/cdp/accessibility.html new file mode 100644 index 0000000..d4309b4 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/accessibility.html @@ -0,0 +1,1097 @@ + + + + + + + + nodriver.cdp.accessibility - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.accessibility

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Accessibility (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import page
+from . import runtime
+
+
+
+[docs] +class AXNodeId(str): + """ + Unique accessibility node identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> AXNodeId: + return cls(json) + + def __repr__(self): + return "AXNodeId({})".format(super().__repr__())
+ + + +
+[docs] +class AXValueType(enum.Enum): + """ + Enum of possible property types. + """ + + BOOLEAN = "boolean" + TRISTATE = "tristate" + BOOLEAN_OR_UNDEFINED = "booleanOrUndefined" + IDREF = "idref" + IDREF_LIST = "idrefList" + INTEGER = "integer" + NODE = "node" + NODE_LIST = "nodeList" + NUMBER = "number" + STRING = "string" + COMPUTED_STRING = "computedString" + TOKEN = "token" + TOKEN_LIST = "tokenList" + DOM_RELATION = "domRelation" + ROLE = "role" + INTERNAL_ROLE = "internalRole" + VALUE_UNDEFINED = "valueUndefined" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AXValueType: + return cls(json)
+ + + +
+[docs] +class AXValueSourceType(enum.Enum): + """ + Enum of possible property sources. + """ + + ATTRIBUTE = "attribute" + IMPLICIT = "implicit" + STYLE = "style" + CONTENTS = "contents" + PLACEHOLDER = "placeholder" + RELATED_ELEMENT = "relatedElement" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AXValueSourceType: + return cls(json)
+ + + +
+[docs] +class AXValueNativeSourceType(enum.Enum): + """ + Enum of possible native property sources (as a subtype of a particular AXValueSourceType). + """ + + DESCRIPTION = "description" + FIGCAPTION = "figcaption" + LABEL = "label" + LABELFOR = "labelfor" + LABELWRAPPED = "labelwrapped" + LEGEND = "legend" + RUBYANNOTATION = "rubyannotation" + TABLECAPTION = "tablecaption" + TITLE = "title" + OTHER = "other" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AXValueNativeSourceType: + return cls(json)
+ + + +
+[docs] +@dataclass +class AXValueSource: + """ + A single source for a computed AX property. + """ + + #: What type of source this is. + type_: AXValueSourceType + + #: The value of this property source. + value: typing.Optional[AXValue] = None + + #: The name of the relevant attribute, if any. + attribute: typing.Optional[str] = None + + #: The value of the relevant attribute, if any. + attribute_value: typing.Optional[AXValue] = None + + #: Whether this source is superseded by a higher priority source. + superseded: typing.Optional[bool] = None + + #: The native markup source for this value, e.g. a ``<label>`` element. + native_source: typing.Optional[AXValueNativeSourceType] = None + + #: The value, such as a node or node list, of the native source. + native_source_value: typing.Optional[AXValue] = None + + #: Whether the value for this property is invalid. + invalid: typing.Optional[bool] = None + + #: Reason for the value being invalid, if it is. + invalid_reason: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_.to_json() + if self.value is not None: + json["value"] = self.value.to_json() + if self.attribute is not None: + json["attribute"] = self.attribute + if self.attribute_value is not None: + json["attributeValue"] = self.attribute_value.to_json() + if self.superseded is not None: + json["superseded"] = self.superseded + if self.native_source is not None: + json["nativeSource"] = self.native_source.to_json() + if self.native_source_value is not None: + json["nativeSourceValue"] = self.native_source_value.to_json() + if self.invalid is not None: + json["invalid"] = self.invalid + if self.invalid_reason is not None: + json["invalidReason"] = self.invalid_reason + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AXValueSource: + return cls( + type_=AXValueSourceType.from_json(json["type"]), + value=( + AXValue.from_json(json["value"]) + if json.get("value", None) is not None + else None + ), + attribute=( + str(json["attribute"]) + if json.get("attribute", None) is not None + else None + ), + attribute_value=( + AXValue.from_json(json["attributeValue"]) + if json.get("attributeValue", None) is not None + else None + ), + superseded=( + bool(json["superseded"]) + if json.get("superseded", None) is not None + else None + ), + native_source=( + AXValueNativeSourceType.from_json(json["nativeSource"]) + if json.get("nativeSource", None) is not None + else None + ), + native_source_value=( + AXValue.from_json(json["nativeSourceValue"]) + if json.get("nativeSourceValue", None) is not None + else None + ), + invalid=( + bool(json["invalid"]) if json.get("invalid", None) is not None else None + ), + invalid_reason=( + str(json["invalidReason"]) + if json.get("invalidReason", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AXRelatedNode: + #: The BackendNodeId of the related DOM node. + backend_dom_node_id: dom.BackendNodeId + + #: The IDRef value provided, if any. + idref: typing.Optional[str] = None + + #: The text alternative of this node in the current context. + text: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["backendDOMNodeId"] = self.backend_dom_node_id.to_json() + if self.idref is not None: + json["idref"] = self.idref + if self.text is not None: + json["text"] = self.text + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AXRelatedNode: + return cls( + backend_dom_node_id=dom.BackendNodeId.from_json(json["backendDOMNodeId"]), + idref=str(json["idref"]) if json.get("idref", None) is not None else None, + text=str(json["text"]) if json.get("text", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class AXProperty: + #: The name of this property. + name: AXPropertyName + + #: The value of this property. + value: AXValue + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name.to_json() + json["value"] = self.value.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AXProperty: + return cls( + name=AXPropertyName.from_json(json["name"]), + value=AXValue.from_json(json["value"]), + )
+ + + +
+[docs] +@dataclass +class AXValue: + """ + A single computed AX property. + """ + + #: The type of this value. + type_: AXValueType + + #: The computed value of this property. + value: typing.Optional[typing.Any] = None + + #: One or more related nodes, if applicable. + related_nodes: typing.Optional[typing.List[AXRelatedNode]] = None + + #: The sources which contributed to the computation of this property. + sources: typing.Optional[typing.List[AXValueSource]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_.to_json() + if self.value is not None: + json["value"] = self.value + if self.related_nodes is not None: + json["relatedNodes"] = [i.to_json() for i in self.related_nodes] + if self.sources is not None: + json["sources"] = [i.to_json() for i in self.sources] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AXValue: + return cls( + type_=AXValueType.from_json(json["type"]), + value=json["value"] if json.get("value", None) is not None else None, + related_nodes=( + [AXRelatedNode.from_json(i) for i in json["relatedNodes"]] + if json.get("relatedNodes", None) is not None + else None + ), + sources=( + [AXValueSource.from_json(i) for i in json["sources"]] + if json.get("sources", None) is not None + else None + ), + )
+ + + +
+[docs] +class AXPropertyName(enum.Enum): + """ + Values of AXProperty name: + - from 'busy' to 'roledescription': states which apply to every AX node + - from 'live' to 'root': attributes which apply to nodes in live regions + - from 'autocomplete' to 'valuetext': attributes which apply to widgets + - from 'checked' to 'selected': states which apply to widgets + - from 'activedescendant' to 'owns' - relationships between elements other than parent/child/sibling. + """ + + BUSY = "busy" + DISABLED = "disabled" + EDITABLE = "editable" + FOCUSABLE = "focusable" + FOCUSED = "focused" + HIDDEN = "hidden" + HIDDEN_ROOT = "hiddenRoot" + INVALID = "invalid" + KEYSHORTCUTS = "keyshortcuts" + SETTABLE = "settable" + ROLEDESCRIPTION = "roledescription" + LIVE = "live" + ATOMIC = "atomic" + RELEVANT = "relevant" + ROOT = "root" + AUTOCOMPLETE = "autocomplete" + HAS_POPUP = "hasPopup" + LEVEL = "level" + MULTISELECTABLE = "multiselectable" + ORIENTATION = "orientation" + MULTILINE = "multiline" + READONLY = "readonly" + REQUIRED = "required" + VALUEMIN = "valuemin" + VALUEMAX = "valuemax" + VALUETEXT = "valuetext" + CHECKED = "checked" + EXPANDED = "expanded" + MODAL = "modal" + PRESSED = "pressed" + SELECTED = "selected" + ACTIVEDESCENDANT = "activedescendant" + CONTROLS = "controls" + DESCRIBEDBY = "describedby" + DETAILS = "details" + ERRORMESSAGE = "errormessage" + FLOWTO = "flowto" + LABELLEDBY = "labelledby" + OWNS = "owns" + URL = "url" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AXPropertyName: + return cls(json)
+ + + +
+[docs] +@dataclass +class AXNode: + """ + A node in the accessibility tree. + """ + + #: Unique identifier for this node. + node_id: AXNodeId + + #: Whether this node is ignored for accessibility + ignored: bool + + #: Collection of reasons why this node is hidden. + ignored_reasons: typing.Optional[typing.List[AXProperty]] = None + + #: This ``Node``'s role, whether explicit or implicit. + role: typing.Optional[AXValue] = None + + #: This ``Node``'s Chrome raw role. + chrome_role: typing.Optional[AXValue] = None + + #: The accessible name for this ``Node``. + name: typing.Optional[AXValue] = None + + #: The accessible description for this ``Node``. + description: typing.Optional[AXValue] = None + + #: The value for this ``Node``. + value: typing.Optional[AXValue] = None + + #: All other properties + properties: typing.Optional[typing.List[AXProperty]] = None + + #: ID for this node's parent. + parent_id: typing.Optional[AXNodeId] = None + + #: IDs for each of this node's child nodes. + child_ids: typing.Optional[typing.List[AXNodeId]] = None + + #: The backend ID for the associated DOM node, if any. + backend_dom_node_id: typing.Optional[dom.BackendNodeId] = None + + #: The frame ID for the frame associated with this nodes document. + frame_id: typing.Optional[page.FrameId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeId"] = self.node_id.to_json() + json["ignored"] = self.ignored + if self.ignored_reasons is not None: + json["ignoredReasons"] = [i.to_json() for i in self.ignored_reasons] + if self.role is not None: + json["role"] = self.role.to_json() + if self.chrome_role is not None: + json["chromeRole"] = self.chrome_role.to_json() + if self.name is not None: + json["name"] = self.name.to_json() + if self.description is not None: + json["description"] = self.description.to_json() + if self.value is not None: + json["value"] = self.value.to_json() + if self.properties is not None: + json["properties"] = [i.to_json() for i in self.properties] + if self.parent_id is not None: + json["parentId"] = self.parent_id.to_json() + if self.child_ids is not None: + json["childIds"] = [i.to_json() for i in self.child_ids] + if self.backend_dom_node_id is not None: + json["backendDOMNodeId"] = self.backend_dom_node_id.to_json() + if self.frame_id is not None: + json["frameId"] = self.frame_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AXNode: + return cls( + node_id=AXNodeId.from_json(json["nodeId"]), + ignored=bool(json["ignored"]), + ignored_reasons=( + [AXProperty.from_json(i) for i in json["ignoredReasons"]] + if json.get("ignoredReasons", None) is not None + else None + ), + role=( + AXValue.from_json(json["role"]) + if json.get("role", None) is not None + else None + ), + chrome_role=( + AXValue.from_json(json["chromeRole"]) + if json.get("chromeRole", None) is not None + else None + ), + name=( + AXValue.from_json(json["name"]) + if json.get("name", None) is not None + else None + ), + description=( + AXValue.from_json(json["description"]) + if json.get("description", None) is not None + else None + ), + value=( + AXValue.from_json(json["value"]) + if json.get("value", None) is not None + else None + ), + properties=( + [AXProperty.from_json(i) for i in json["properties"]] + if json.get("properties", None) is not None + else None + ), + parent_id=( + AXNodeId.from_json(json["parentId"]) + if json.get("parentId", None) is not None + else None + ), + child_ids=( + [AXNodeId.from_json(i) for i in json["childIds"]] + if json.get("childIds", None) is not None + else None + ), + backend_dom_node_id=( + dom.BackendNodeId.from_json(json["backendDOMNodeId"]) + if json.get("backendDOMNodeId", None) is not None + else None + ), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables the accessibility domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables the accessibility domain which causes ``AXNodeId``'s to remain consistent between method calls. + This turns on accessibility for the page, which can impact performance until accessibility is disabled. + """ + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_partial_ax_tree( + node_id: typing.Optional[dom.NodeId] = None, + backend_node_id: typing.Optional[dom.BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, + fetch_relatives: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[AXNode]]: + """ + Fetches the accessibility node and partial accessibility tree for this DOM node, if it exists. + + **EXPERIMENTAL** + + :param node_id: *(Optional)* Identifier of the node to get the partial accessibility tree for. + :param backend_node_id: *(Optional)* Identifier of the backend node to get the partial accessibility tree for. + :param object_id: *(Optional)* JavaScript object id of the node wrapper to get the partial accessibility tree for. + :param fetch_relatives: *(Optional)* Whether to fetch this node's ancestors, siblings and children. Defaults to true. + :returns: The ``Accessibility.AXNode`` for this DOM node, if it exists, plus its ancestors, siblings and children, if requested. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + if fetch_relatives is not None: + params["fetchRelatives"] = fetch_relatives + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.getPartialAXTree", + "params": params, + } + json = yield cmd_dict + return [AXNode.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +def get_full_ax_tree( + depth: typing.Optional[int] = None, frame_id: typing.Optional[page.FrameId] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[AXNode]]: + """ + Fetches the entire accessibility tree for the root Document + + **EXPERIMENTAL** + + :param depth: *(Optional)* The maximum depth at which descendants of the root node should be retrieved. If omitted, the full tree is returned. + :param frame_id: *(Optional)* The frame for whose document the AX tree should be retrieved. If omitted, the root frame is used. + :returns: + """ + params: T_JSON_DICT = dict() + if depth is not None: + params["depth"] = depth + if frame_id is not None: + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.getFullAXTree", + "params": params, + } + json = yield cmd_dict + return [AXNode.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +def get_root_ax_node( + frame_id: typing.Optional[page.FrameId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, AXNode]: + """ + Fetches the root node. + Requires ``enable()`` to have been called previously. + + **EXPERIMENTAL** + + :param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used. + :returns: + """ + params: T_JSON_DICT = dict() + if frame_id is not None: + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.getRootAXNode", + "params": params, + } + json = yield cmd_dict + return AXNode.from_json(json["node"])
+ + + +
+[docs] +def get_ax_node_and_ancestors( + node_id: typing.Optional[dom.NodeId] = None, + backend_node_id: typing.Optional[dom.BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[AXNode]]: + """ + Fetches a node and all ancestors up to and including the root. + Requires ``enable()`` to have been called previously. + + **EXPERIMENTAL** + + :param node_id: *(Optional)* Identifier of the node to get. + :param backend_node_id: *(Optional)* Identifier of the backend node to get. + :param object_id: *(Optional)* JavaScript object id of the node wrapper to get. + :returns: + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.getAXNodeAndAncestors", + "params": params, + } + json = yield cmd_dict + return [AXNode.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +def get_child_ax_nodes( + id_: AXNodeId, frame_id: typing.Optional[page.FrameId] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[AXNode]]: + """ + Fetches a particular accessibility node by AXNodeId. + Requires ``enable()`` to have been called previously. + + **EXPERIMENTAL** + + :param id_: + :param frame_id: *(Optional)* The frame in whose document the node resides. If omitted, the root frame is used. + :returns: + """ + params: T_JSON_DICT = dict() + params["id"] = id_.to_json() + if frame_id is not None: + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.getChildAXNodes", + "params": params, + } + json = yield cmd_dict + return [AXNode.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +def query_ax_tree( + node_id: typing.Optional[dom.NodeId] = None, + backend_node_id: typing.Optional[dom.BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, + accessible_name: typing.Optional[str] = None, + role: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[AXNode]]: + """ + Query a DOM node's accessibility subtree for accessible name and role. + This command computes the name and role for all nodes in the subtree, including those that are + ignored for accessibility, and returns those that match the specified name and role. If no DOM + node is specified, or the DOM node does not exist, the command returns an error. If neither + ``accessibleName`` or ``role`` is specified, it returns all the accessibility nodes in the subtree. + + **EXPERIMENTAL** + + :param node_id: *(Optional)* Identifier of the node for the root to query. + :param backend_node_id: *(Optional)* Identifier of the backend node for the root to query. + :param object_id: *(Optional)* JavaScript object id of the node wrapper for the root to query. + :param accessible_name: *(Optional)* Find nodes with this computed name. + :param role: *(Optional)* Find nodes with this computed role. + :returns: A list of ``Accessibility.AXNode`` matching the specified attributes, including nodes that are ignored for accessibility. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + if accessible_name is not None: + params["accessibleName"] = accessible_name + if role is not None: + params["role"] = role + cmd_dict: T_JSON_DICT = { + "method": "Accessibility.queryAXTree", + "params": params, + } + json = yield cmd_dict + return [AXNode.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +@event_class("Accessibility.loadComplete") +@dataclass +class LoadComplete: + """ + **EXPERIMENTAL** + + The loadComplete event mirrors the load complete event sent by the browser to assistive + technology when the web page has finished loading. + """ + + #: New document root node. + root: AXNode + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadComplete: + return cls(root=AXNode.from_json(json["root"]))
+ + + +
+[docs] +@event_class("Accessibility.nodesUpdated") +@dataclass +class NodesUpdated: + """ + **EXPERIMENTAL** + + The nodesUpdated event is sent every time a previously requested node has changed the in tree. + """ + + #: Updated node data. + nodes: typing.List[AXNode] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodesUpdated: + return cls(nodes=[AXNode.from_json(i) for i in json["nodes"]])
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/animation.html b/docs/_build/html/_modules/nodriver/cdp/animation.html new file mode 100644 index 0000000..00ddd2a --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/animation.html @@ -0,0 +1,879 @@ + + + + + + + + nodriver.cdp.animation - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.animation

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Animation (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import runtime
+
+
+
+[docs] +@dataclass +class Animation: + """ + Animation instance. + """ + + #: ``Animation``'s id. + id_: str + + #: ``Animation``'s name. + name: str + + #: ``Animation``'s internal paused state. + paused_state: bool + + #: ``Animation``'s play state. + play_state: str + + #: ``Animation``'s playback rate. + playback_rate: float + + #: ``Animation``'s start time. + #: Milliseconds for time based animations and + #: percentage [0 - 100] for scroll driven animations + #: (i.e. when viewOrScrollTimeline exists). + start_time: float + + #: ``Animation``'s current time. + current_time: float + + #: Animation type of ``Animation``. + type_: str + + #: ``Animation``'s source animation node. + source: typing.Optional[AnimationEffect] = None + + #: A unique ID for ``Animation`` representing the sources that triggered this CSS + #: animation/transition. + css_id: typing.Optional[str] = None + + #: View or scroll timeline + view_or_scroll_timeline: typing.Optional[ViewOrScrollTimeline] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_ + json["name"] = self.name + json["pausedState"] = self.paused_state + json["playState"] = self.play_state + json["playbackRate"] = self.playback_rate + json["startTime"] = self.start_time + json["currentTime"] = self.current_time + json["type"] = self.type_ + if self.source is not None: + json["source"] = self.source.to_json() + if self.css_id is not None: + json["cssId"] = self.css_id + if self.view_or_scroll_timeline is not None: + json["viewOrScrollTimeline"] = self.view_or_scroll_timeline.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Animation: + return cls( + id_=str(json["id"]), + name=str(json["name"]), + paused_state=bool(json["pausedState"]), + play_state=str(json["playState"]), + playback_rate=float(json["playbackRate"]), + start_time=float(json["startTime"]), + current_time=float(json["currentTime"]), + type_=str(json["type"]), + source=( + AnimationEffect.from_json(json["source"]) + if json.get("source", None) is not None + else None + ), + css_id=str(json["cssId"]) if json.get("cssId", None) is not None else None, + view_or_scroll_timeline=( + ViewOrScrollTimeline.from_json(json["viewOrScrollTimeline"]) + if json.get("viewOrScrollTimeline", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ViewOrScrollTimeline: + """ + Timeline instance + """ + + #: Orientation of the scroll + axis: dom.ScrollOrientation + + #: Scroll container node + source_node_id: typing.Optional[dom.BackendNodeId] = None + + #: Represents the starting scroll position of the timeline + #: as a length offset in pixels from scroll origin. + start_offset: typing.Optional[float] = None + + #: Represents the ending scroll position of the timeline + #: as a length offset in pixels from scroll origin. + end_offset: typing.Optional[float] = None + + #: The element whose principal box's visibility in the + #: scrollport defined the progress of the timeline. + #: Does not exist for animations with ScrollTimeline + subject_node_id: typing.Optional[dom.BackendNodeId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["axis"] = self.axis.to_json() + if self.source_node_id is not None: + json["sourceNodeId"] = self.source_node_id.to_json() + if self.start_offset is not None: + json["startOffset"] = self.start_offset + if self.end_offset is not None: + json["endOffset"] = self.end_offset + if self.subject_node_id is not None: + json["subjectNodeId"] = self.subject_node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ViewOrScrollTimeline: + return cls( + axis=dom.ScrollOrientation.from_json(json["axis"]), + source_node_id=( + dom.BackendNodeId.from_json(json["sourceNodeId"]) + if json.get("sourceNodeId", None) is not None + else None + ), + start_offset=( + float(json["startOffset"]) + if json.get("startOffset", None) is not None + else None + ), + end_offset=( + float(json["endOffset"]) + if json.get("endOffset", None) is not None + else None + ), + subject_node_id=( + dom.BackendNodeId.from_json(json["subjectNodeId"]) + if json.get("subjectNodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AnimationEffect: + """ + AnimationEffect instance + """ + + #: ``AnimationEffect``'s delay. + delay: float + + #: ``AnimationEffect``'s end delay. + end_delay: float + + #: ``AnimationEffect``'s iteration start. + iteration_start: float + + #: ``AnimationEffect``'s iterations. + iterations: float + + #: ``AnimationEffect``'s iteration duration. + #: Milliseconds for time based animations and + #: percentage [0 - 100] for scroll driven animations + #: (i.e. when viewOrScrollTimeline exists). + duration: float + + #: ``AnimationEffect``'s playback direction. + direction: str + + #: ``AnimationEffect``'s fill mode. + fill: str + + #: ``AnimationEffect``'s timing function. + easing: str + + #: ``AnimationEffect``'s target node. + backend_node_id: typing.Optional[dom.BackendNodeId] = None + + #: ``AnimationEffect``'s keyframes. + keyframes_rule: typing.Optional[KeyframesRule] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["delay"] = self.delay + json["endDelay"] = self.end_delay + json["iterationStart"] = self.iteration_start + json["iterations"] = self.iterations + json["duration"] = self.duration + json["direction"] = self.direction + json["fill"] = self.fill + json["easing"] = self.easing + if self.backend_node_id is not None: + json["backendNodeId"] = self.backend_node_id.to_json() + if self.keyframes_rule is not None: + json["keyframesRule"] = self.keyframes_rule.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AnimationEffect: + return cls( + delay=float(json["delay"]), + end_delay=float(json["endDelay"]), + iteration_start=float(json["iterationStart"]), + iterations=float(json["iterations"]), + duration=float(json["duration"]), + direction=str(json["direction"]), + fill=str(json["fill"]), + easing=str(json["easing"]), + backend_node_id=( + dom.BackendNodeId.from_json(json["backendNodeId"]) + if json.get("backendNodeId", None) is not None + else None + ), + keyframes_rule=( + KeyframesRule.from_json(json["keyframesRule"]) + if json.get("keyframesRule", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class KeyframesRule: + """ + Keyframes Rule + """ + + #: List of animation keyframes. + keyframes: typing.List[KeyframeStyle] + + #: CSS keyframed animation's name. + name: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["keyframes"] = [i.to_json() for i in self.keyframes] + if self.name is not None: + json["name"] = self.name + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> KeyframesRule: + return cls( + keyframes=[KeyframeStyle.from_json(i) for i in json["keyframes"]], + name=str(json["name"]) if json.get("name", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class KeyframeStyle: + """ + Keyframe Style + """ + + #: Keyframe's time offset. + offset: str + + #: ``AnimationEffect``'s timing function. + easing: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["offset"] = self.offset + json["easing"] = self.easing + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> KeyframeStyle: + return cls( + offset=str(json["offset"]), + easing=str(json["easing"]), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables animation domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Animation.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables animation domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Animation.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_current_time(id_: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, float]: + """ + Returns the current time of the an animation. + + :param id_: Id of animation. + :returns: Current time of the page. + """ + params: T_JSON_DICT = dict() + params["id"] = id_ + cmd_dict: T_JSON_DICT = { + "method": "Animation.getCurrentTime", + "params": params, + } + json = yield cmd_dict + return float(json["currentTime"])
+ + + +
+[docs] +def get_playback_rate() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, float]: + """ + Gets the playback rate of the document timeline. + + :returns: Playback rate for animations on page. + """ + cmd_dict: T_JSON_DICT = { + "method": "Animation.getPlaybackRate", + } + json = yield cmd_dict + return float(json["playbackRate"])
+ + + +
+[docs] +def release_animations( + animations: typing.List[str], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Releases a set of animations to no longer be manipulated. + + :param animations: List of animation ids to seek. + """ + params: T_JSON_DICT = dict() + params["animations"] = [i for i in animations] + cmd_dict: T_JSON_DICT = { + "method": "Animation.releaseAnimations", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def resolve_animation( + animation_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.RemoteObject]: + """ + Gets the remote object of the Animation. + + :param animation_id: Animation id. + :returns: Corresponding remote object. + """ + params: T_JSON_DICT = dict() + params["animationId"] = animation_id + cmd_dict: T_JSON_DICT = { + "method": "Animation.resolveAnimation", + "params": params, + } + json = yield cmd_dict + return runtime.RemoteObject.from_json(json["remoteObject"])
+ + + +
+[docs] +def seek_animations( + animations: typing.List[str], current_time: float +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Seek a set of animations to a particular time within each animation. + + :param animations: List of animation ids to seek. + :param current_time: Set the current time of each animation. + """ + params: T_JSON_DICT = dict() + params["animations"] = [i for i in animations] + params["currentTime"] = current_time + cmd_dict: T_JSON_DICT = { + "method": "Animation.seekAnimations", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_paused( + animations: typing.List[str], paused: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets the paused state of a set of animations. + + :param animations: Animations to set the pause state of. + :param paused: Paused state to set to. + """ + params: T_JSON_DICT = dict() + params["animations"] = [i for i in animations] + params["paused"] = paused + cmd_dict: T_JSON_DICT = { + "method": "Animation.setPaused", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_playback_rate( + playback_rate: float, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets the playback rate of the document timeline. + + :param playback_rate: Playback rate for animations on page + """ + params: T_JSON_DICT = dict() + params["playbackRate"] = playback_rate + cmd_dict: T_JSON_DICT = { + "method": "Animation.setPlaybackRate", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_timing( + animation_id: str, duration: float, delay: float +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets the timing of an animation node. + + :param animation_id: Animation id. + :param duration: Duration of the animation. + :param delay: Delay of the animation. + """ + params: T_JSON_DICT = dict() + params["animationId"] = animation_id + params["duration"] = duration + params["delay"] = delay + cmd_dict: T_JSON_DICT = { + "method": "Animation.setTiming", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Animation.animationCanceled") +@dataclass +class AnimationCanceled: + """ + Event for when an animation has been cancelled. + """ + + #: Id of the animation that was cancelled. + id_: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AnimationCanceled: + return cls(id_=str(json["id"]))
+ + + +
+[docs] +@event_class("Animation.animationCreated") +@dataclass +class AnimationCreated: + """ + Event for each animation that has been created. + """ + + #: Id of the animation that was created. + id_: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AnimationCreated: + return cls(id_=str(json["id"]))
+ + + +
+[docs] +@event_class("Animation.animationStarted") +@dataclass +class AnimationStarted: + """ + Event for animation that has been started. + """ + + #: Animation that was started. + animation: Animation + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AnimationStarted: + return cls(animation=Animation.from_json(json["animation"]))
+ + + +
+[docs] +@event_class("Animation.animationUpdated") +@dataclass +class AnimationUpdated: + """ + Event for animation that has been updated. + """ + + #: Animation that was updated. + animation: Animation + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AnimationUpdated: + return cls(animation=Animation.from_json(json["animation"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/audits.html b/docs/_build/html/_modules/nodriver/cdp/audits.html new file mode 100644 index 0000000..3080dcd --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/audits.html @@ -0,0 +1,2388 @@ + + + + + + + + nodriver.cdp.audits - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.audits

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Audits (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import network
+from . import page
+from . import runtime
+
+
+
+[docs] +@dataclass +class AffectedCookie: + """ + Information about a cookie that is affected by an inspector issue. + """ + + #: The following three properties uniquely identify a cookie + name: str + + path: str + + domain: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["path"] = self.path + json["domain"] = self.domain + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AffectedCookie: + return cls( + name=str(json["name"]), + path=str(json["path"]), + domain=str(json["domain"]), + )
+ + + +
+[docs] +@dataclass +class AffectedRequest: + """ + Information about a request that is affected by an inspector issue. + """ + + #: The unique request id. + request_id: network.RequestId + + url: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["requestId"] = self.request_id.to_json() + if self.url is not None: + json["url"] = self.url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AffectedRequest: + return cls( + request_id=network.RequestId.from_json(json["requestId"]), + url=str(json["url"]) if json.get("url", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class AffectedFrame: + """ + Information about the frame affected by an inspector issue. + """ + + frame_id: page.FrameId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["frameId"] = self.frame_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AffectedFrame: + return cls( + frame_id=page.FrameId.from_json(json["frameId"]), + )
+ + + +
+[docs] +class CookieExclusionReason(enum.Enum): + EXCLUDE_SAME_SITE_UNSPECIFIED_TREATED_AS_LAX = ( + "ExcludeSameSiteUnspecifiedTreatedAsLax" + ) + EXCLUDE_SAME_SITE_NONE_INSECURE = "ExcludeSameSiteNoneInsecure" + EXCLUDE_SAME_SITE_LAX = "ExcludeSameSiteLax" + EXCLUDE_SAME_SITE_STRICT = "ExcludeSameSiteStrict" + EXCLUDE_INVALID_SAME_PARTY = "ExcludeInvalidSameParty" + EXCLUDE_SAME_PARTY_CROSS_PARTY_CONTEXT = "ExcludeSamePartyCrossPartyContext" + EXCLUDE_DOMAIN_NON_ASCII = "ExcludeDomainNonASCII" + EXCLUDE_THIRD_PARTY_COOKIE_BLOCKED_IN_FIRST_PARTY_SET = ( + "ExcludeThirdPartyCookieBlockedInFirstPartySet" + ) + EXCLUDE_THIRD_PARTY_PHASEOUT = "ExcludeThirdPartyPhaseout" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieExclusionReason: + return cls(json)
+ + + +
+[docs] +class CookieWarningReason(enum.Enum): + WARN_SAME_SITE_UNSPECIFIED_CROSS_SITE_CONTEXT = ( + "WarnSameSiteUnspecifiedCrossSiteContext" + ) + WARN_SAME_SITE_NONE_INSECURE = "WarnSameSiteNoneInsecure" + WARN_SAME_SITE_UNSPECIFIED_LAX_ALLOW_UNSAFE = ( + "WarnSameSiteUnspecifiedLaxAllowUnsafe" + ) + WARN_SAME_SITE_STRICT_LAX_DOWNGRADE_STRICT = "WarnSameSiteStrictLaxDowngradeStrict" + WARN_SAME_SITE_STRICT_CROSS_DOWNGRADE_STRICT = ( + "WarnSameSiteStrictCrossDowngradeStrict" + ) + WARN_SAME_SITE_STRICT_CROSS_DOWNGRADE_LAX = "WarnSameSiteStrictCrossDowngradeLax" + WARN_SAME_SITE_LAX_CROSS_DOWNGRADE_STRICT = "WarnSameSiteLaxCrossDowngradeStrict" + WARN_SAME_SITE_LAX_CROSS_DOWNGRADE_LAX = "WarnSameSiteLaxCrossDowngradeLax" + WARN_ATTRIBUTE_VALUE_EXCEEDS_MAX_SIZE = "WarnAttributeValueExceedsMaxSize" + WARN_DOMAIN_NON_ASCII = "WarnDomainNonASCII" + WARN_THIRD_PARTY_PHASEOUT = "WarnThirdPartyPhaseout" + WARN_CROSS_SITE_REDIRECT_DOWNGRADE_CHANGES_INCLUSION = ( + "WarnCrossSiteRedirectDowngradeChangesInclusion" + ) + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieWarningReason: + return cls(json)
+ + + +
+[docs] +class CookieOperation(enum.Enum): + SET_COOKIE = "SetCookie" + READ_COOKIE = "ReadCookie" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieOperation: + return cls(json)
+ + + +
+[docs] +@dataclass +class CookieIssueDetails: + """ + This information is currently necessary, as the front-end has a difficult + time finding a specific cookie. With this, we can convey specific error + information without the cookie. + """ + + cookie_warning_reasons: typing.List[CookieWarningReason] + + cookie_exclusion_reasons: typing.List[CookieExclusionReason] + + #: Optionally identifies the site-for-cookies and the cookie url, which + #: may be used by the front-end as additional context. + operation: CookieOperation + + #: If AffectedCookie is not set then rawCookieLine contains the raw + #: Set-Cookie header string. This hints at a problem where the + #: cookie line is syntactically or semantically malformed in a way + #: that no valid cookie could be created. + cookie: typing.Optional[AffectedCookie] = None + + raw_cookie_line: typing.Optional[str] = None + + site_for_cookies: typing.Optional[str] = None + + cookie_url: typing.Optional[str] = None + + request: typing.Optional[AffectedRequest] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["cookieWarningReasons"] = [ + i.to_json() for i in self.cookie_warning_reasons + ] + json["cookieExclusionReasons"] = [ + i.to_json() for i in self.cookie_exclusion_reasons + ] + json["operation"] = self.operation.to_json() + if self.cookie is not None: + json["cookie"] = self.cookie.to_json() + if self.raw_cookie_line is not None: + json["rawCookieLine"] = self.raw_cookie_line + if self.site_for_cookies is not None: + json["siteForCookies"] = self.site_for_cookies + if self.cookie_url is not None: + json["cookieUrl"] = self.cookie_url + if self.request is not None: + json["request"] = self.request.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CookieIssueDetails: + return cls( + cookie_warning_reasons=[ + CookieWarningReason.from_json(i) for i in json["cookieWarningReasons"] + ], + cookie_exclusion_reasons=[ + CookieExclusionReason.from_json(i) + for i in json["cookieExclusionReasons"] + ], + operation=CookieOperation.from_json(json["operation"]), + cookie=( + AffectedCookie.from_json(json["cookie"]) + if json.get("cookie", None) is not None + else None + ), + raw_cookie_line=( + str(json["rawCookieLine"]) + if json.get("rawCookieLine", None) is not None + else None + ), + site_for_cookies=( + str(json["siteForCookies"]) + if json.get("siteForCookies", None) is not None + else None + ), + cookie_url=( + str(json["cookieUrl"]) + if json.get("cookieUrl", None) is not None + else None + ), + request=( + AffectedRequest.from_json(json["request"]) + if json.get("request", None) is not None + else None + ), + )
+ + + +
+[docs] +class MixedContentResolutionStatus(enum.Enum): + MIXED_CONTENT_BLOCKED = "MixedContentBlocked" + MIXED_CONTENT_AUTOMATICALLY_UPGRADED = "MixedContentAutomaticallyUpgraded" + MIXED_CONTENT_WARNING = "MixedContentWarning" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> MixedContentResolutionStatus: + return cls(json)
+ + + +
+[docs] +class MixedContentResourceType(enum.Enum): + ATTRIBUTION_SRC = "AttributionSrc" + AUDIO = "Audio" + BEACON = "Beacon" + CSP_REPORT = "CSPReport" + DOWNLOAD = "Download" + EVENT_SOURCE = "EventSource" + FAVICON = "Favicon" + FONT = "Font" + FORM = "Form" + FRAME = "Frame" + IMAGE = "Image" + IMPORT = "Import" + JSON = "JSON" + MANIFEST = "Manifest" + PING = "Ping" + PLUGIN_DATA = "PluginData" + PLUGIN_RESOURCE = "PluginResource" + PREFETCH = "Prefetch" + RESOURCE = "Resource" + SCRIPT = "Script" + SERVICE_WORKER = "ServiceWorker" + SHARED_WORKER = "SharedWorker" + SPECULATION_RULES = "SpeculationRules" + STYLESHEET = "Stylesheet" + TRACK = "Track" + VIDEO = "Video" + WORKER = "Worker" + XML_HTTP_REQUEST = "XMLHttpRequest" + XSLT = "XSLT" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> MixedContentResourceType: + return cls(json)
+ + + +
+[docs] +@dataclass +class MixedContentIssueDetails: + #: The way the mixed content issue is being resolved. + resolution_status: MixedContentResolutionStatus + + #: The unsafe http url causing the mixed content issue. + insecure_url: str + + #: The url responsible for the call to an unsafe url. + main_resource_url: str + + #: The type of resource causing the mixed content issue (css, js, iframe, + #: form,...). Marked as optional because it is mapped to from + #: blink::mojom::RequestContextType, which will be replaced + #: by network::mojom::RequestDestination + resource_type: typing.Optional[MixedContentResourceType] = None + + #: The mixed content request. + #: Does not always exist (e.g. for unsafe form submission urls). + request: typing.Optional[AffectedRequest] = None + + #: Optional because not every mixed content issue is necessarily linked to a frame. + frame: typing.Optional[AffectedFrame] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["resolutionStatus"] = self.resolution_status.to_json() + json["insecureURL"] = self.insecure_url + json["mainResourceURL"] = self.main_resource_url + if self.resource_type is not None: + json["resourceType"] = self.resource_type.to_json() + if self.request is not None: + json["request"] = self.request.to_json() + if self.frame is not None: + json["frame"] = self.frame.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MixedContentIssueDetails: + return cls( + resolution_status=MixedContentResolutionStatus.from_json( + json["resolutionStatus"] + ), + insecure_url=str(json["insecureURL"]), + main_resource_url=str(json["mainResourceURL"]), + resource_type=( + MixedContentResourceType.from_json(json["resourceType"]) + if json.get("resourceType", None) is not None + else None + ), + request=( + AffectedRequest.from_json(json["request"]) + if json.get("request", None) is not None + else None + ), + frame=( + AffectedFrame.from_json(json["frame"]) + if json.get("frame", None) is not None + else None + ), + )
+ + + +
+[docs] +class BlockedByResponseReason(enum.Enum): + """ + Enum indicating the reason a response has been blocked. These reasons are + refinements of the net error BLOCKED_BY_RESPONSE. + """ + + COEP_FRAME_RESOURCE_NEEDS_COEP_HEADER = "CoepFrameResourceNeedsCoepHeader" + COOP_SANDBOXED_I_FRAME_CANNOT_NAVIGATE_TO_COOP_PAGE = ( + "CoopSandboxedIFrameCannotNavigateToCoopPage" + ) + CORP_NOT_SAME_ORIGIN = "CorpNotSameOrigin" + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_COEP = ( + "CorpNotSameOriginAfterDefaultedToSameOriginByCoep" + ) + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_DIP = ( + "CorpNotSameOriginAfterDefaultedToSameOriginByDip" + ) + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_COEP_AND_DIP = ( + "CorpNotSameOriginAfterDefaultedToSameOriginByCoepAndDip" + ) + CORP_NOT_SAME_SITE = "CorpNotSameSite" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> BlockedByResponseReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class BlockedByResponseIssueDetails: + """ + Details for a request that has been blocked with the BLOCKED_BY_RESPONSE + code. Currently only used for COEP/COOP, but may be extended to include + some CSP errors in the future. + """ + + request: AffectedRequest + + reason: BlockedByResponseReason + + parent_frame: typing.Optional[AffectedFrame] = None + + blocked_frame: typing.Optional[AffectedFrame] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["request"] = self.request.to_json() + json["reason"] = self.reason.to_json() + if self.parent_frame is not None: + json["parentFrame"] = self.parent_frame.to_json() + if self.blocked_frame is not None: + json["blockedFrame"] = self.blocked_frame.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BlockedByResponseIssueDetails: + return cls( + request=AffectedRequest.from_json(json["request"]), + reason=BlockedByResponseReason.from_json(json["reason"]), + parent_frame=( + AffectedFrame.from_json(json["parentFrame"]) + if json.get("parentFrame", None) is not None + else None + ), + blocked_frame=( + AffectedFrame.from_json(json["blockedFrame"]) + if json.get("blockedFrame", None) is not None + else None + ), + )
+ + + +
+[docs] +class HeavyAdResolutionStatus(enum.Enum): + HEAVY_AD_BLOCKED = "HeavyAdBlocked" + HEAVY_AD_WARNING = "HeavyAdWarning" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> HeavyAdResolutionStatus: + return cls(json)
+ + + +
+[docs] +class HeavyAdReason(enum.Enum): + NETWORK_TOTAL_LIMIT = "NetworkTotalLimit" + CPU_TOTAL_LIMIT = "CpuTotalLimit" + CPU_PEAK_LIMIT = "CpuPeakLimit" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> HeavyAdReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class HeavyAdIssueDetails: + #: The resolution status, either blocking the content or warning. + resolution: HeavyAdResolutionStatus + + #: The reason the ad was blocked, total network or cpu or peak cpu. + reason: HeavyAdReason + + #: The frame that was blocked. + frame: AffectedFrame + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["resolution"] = self.resolution.to_json() + json["reason"] = self.reason.to_json() + json["frame"] = self.frame.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> HeavyAdIssueDetails: + return cls( + resolution=HeavyAdResolutionStatus.from_json(json["resolution"]), + reason=HeavyAdReason.from_json(json["reason"]), + frame=AffectedFrame.from_json(json["frame"]), + )
+ + + +
+[docs] +class ContentSecurityPolicyViolationType(enum.Enum): + K_INLINE_VIOLATION = "kInlineViolation" + K_EVAL_VIOLATION = "kEvalViolation" + K_URL_VIOLATION = "kURLViolation" + K_TRUSTED_TYPES_SINK_VIOLATION = "kTrustedTypesSinkViolation" + K_TRUSTED_TYPES_POLICY_VIOLATION = "kTrustedTypesPolicyViolation" + K_WASM_EVAL_VIOLATION = "kWasmEvalViolation" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContentSecurityPolicyViolationType: + return cls(json)
+ + + +
+[docs] +@dataclass +class SourceCodeLocation: + url: str + + line_number: int + + column_number: int + + script_id: typing.Optional[runtime.ScriptId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + if self.script_id is not None: + json["scriptId"] = self.script_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SourceCodeLocation: + return cls( + url=str(json["url"]), + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + script_id=( + runtime.ScriptId.from_json(json["scriptId"]) + if json.get("scriptId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ContentSecurityPolicyIssueDetails: + #: Specific directive that is violated, causing the CSP issue. + violated_directive: str + + is_report_only: bool + + content_security_policy_violation_type: ContentSecurityPolicyViolationType + + #: The url not included in allowed sources. + blocked_url: typing.Optional[str] = None + + frame_ancestor: typing.Optional[AffectedFrame] = None + + source_code_location: typing.Optional[SourceCodeLocation] = None + + violating_node_id: typing.Optional[dom.BackendNodeId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["violatedDirective"] = self.violated_directive + json["isReportOnly"] = self.is_report_only + json["contentSecurityPolicyViolationType"] = ( + self.content_security_policy_violation_type.to_json() + ) + if self.blocked_url is not None: + json["blockedURL"] = self.blocked_url + if self.frame_ancestor is not None: + json["frameAncestor"] = self.frame_ancestor.to_json() + if self.source_code_location is not None: + json["sourceCodeLocation"] = self.source_code_location.to_json() + if self.violating_node_id is not None: + json["violatingNodeId"] = self.violating_node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContentSecurityPolicyIssueDetails: + return cls( + violated_directive=str(json["violatedDirective"]), + is_report_only=bool(json["isReportOnly"]), + content_security_policy_violation_type=ContentSecurityPolicyViolationType.from_json( + json["contentSecurityPolicyViolationType"] + ), + blocked_url=( + str(json["blockedURL"]) + if json.get("blockedURL", None) is not None + else None + ), + frame_ancestor=( + AffectedFrame.from_json(json["frameAncestor"]) + if json.get("frameAncestor", None) is not None + else None + ), + source_code_location=( + SourceCodeLocation.from_json(json["sourceCodeLocation"]) + if json.get("sourceCodeLocation", None) is not None + else None + ), + violating_node_id=( + dom.BackendNodeId.from_json(json["violatingNodeId"]) + if json.get("violatingNodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +class SharedArrayBufferIssueType(enum.Enum): + TRANSFER_ISSUE = "TransferIssue" + CREATION_ISSUE = "CreationIssue" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SharedArrayBufferIssueType: + return cls(json)
+ + + +
+[docs] +@dataclass +class SharedArrayBufferIssueDetails: + """ + Details for a issue arising from an SAB being instantiated in, or + transferred to a context that is not cross-origin isolated. + """ + + source_code_location: SourceCodeLocation + + is_warning: bool + + type_: SharedArrayBufferIssueType + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sourceCodeLocation"] = self.source_code_location.to_json() + json["isWarning"] = self.is_warning + json["type"] = self.type_.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedArrayBufferIssueDetails: + return cls( + source_code_location=SourceCodeLocation.from_json( + json["sourceCodeLocation"] + ), + is_warning=bool(json["isWarning"]), + type_=SharedArrayBufferIssueType.from_json(json["type"]), + )
+ + + +
+[docs] +@dataclass +class LowTextContrastIssueDetails: + violating_node_id: dom.BackendNodeId + + violating_node_selector: str + + contrast_ratio: float + + threshold_aa: float + + threshold_aaa: float + + font_size: str + + font_weight: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["violatingNodeId"] = self.violating_node_id.to_json() + json["violatingNodeSelector"] = self.violating_node_selector + json["contrastRatio"] = self.contrast_ratio + json["thresholdAA"] = self.threshold_aa + json["thresholdAAA"] = self.threshold_aaa + json["fontSize"] = self.font_size + json["fontWeight"] = self.font_weight + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LowTextContrastIssueDetails: + return cls( + violating_node_id=dom.BackendNodeId.from_json(json["violatingNodeId"]), + violating_node_selector=str(json["violatingNodeSelector"]), + contrast_ratio=float(json["contrastRatio"]), + threshold_aa=float(json["thresholdAA"]), + threshold_aaa=float(json["thresholdAAA"]), + font_size=str(json["fontSize"]), + font_weight=str(json["fontWeight"]), + )
+ + + +
+[docs] +@dataclass +class CorsIssueDetails: + """ + Details for a CORS related issue, e.g. a warning or error related to + CORS RFC1918 enforcement. + """ + + cors_error_status: network.CorsErrorStatus + + is_warning: bool + + request: AffectedRequest + + location: typing.Optional[SourceCodeLocation] = None + + initiator_origin: typing.Optional[str] = None + + resource_ip_address_space: typing.Optional[network.IPAddressSpace] = None + + client_security_state: typing.Optional[network.ClientSecurityState] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["corsErrorStatus"] = self.cors_error_status.to_json() + json["isWarning"] = self.is_warning + json["request"] = self.request.to_json() + if self.location is not None: + json["location"] = self.location.to_json() + if self.initiator_origin is not None: + json["initiatorOrigin"] = self.initiator_origin + if self.resource_ip_address_space is not None: + json["resourceIPAddressSpace"] = self.resource_ip_address_space.to_json() + if self.client_security_state is not None: + json["clientSecurityState"] = self.client_security_state.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CorsIssueDetails: + return cls( + cors_error_status=network.CorsErrorStatus.from_json( + json["corsErrorStatus"] + ), + is_warning=bool(json["isWarning"]), + request=AffectedRequest.from_json(json["request"]), + location=( + SourceCodeLocation.from_json(json["location"]) + if json.get("location", None) is not None + else None + ), + initiator_origin=( + str(json["initiatorOrigin"]) + if json.get("initiatorOrigin", None) is not None + else None + ), + resource_ip_address_space=( + network.IPAddressSpace.from_json(json["resourceIPAddressSpace"]) + if json.get("resourceIPAddressSpace", None) is not None + else None + ), + client_security_state=( + network.ClientSecurityState.from_json(json["clientSecurityState"]) + if json.get("clientSecurityState", None) is not None + else None + ), + )
+ + + +
+[docs] +class AttributionReportingIssueType(enum.Enum): + PERMISSION_POLICY_DISABLED = "PermissionPolicyDisabled" + UNTRUSTWORTHY_REPORTING_ORIGIN = "UntrustworthyReportingOrigin" + INSECURE_CONTEXT = "InsecureContext" + INVALID_HEADER = "InvalidHeader" + INVALID_REGISTER_TRIGGER_HEADER = "InvalidRegisterTriggerHeader" + SOURCE_AND_TRIGGER_HEADERS = "SourceAndTriggerHeaders" + SOURCE_IGNORED = "SourceIgnored" + TRIGGER_IGNORED = "TriggerIgnored" + OS_SOURCE_IGNORED = "OsSourceIgnored" + OS_TRIGGER_IGNORED = "OsTriggerIgnored" + INVALID_REGISTER_OS_SOURCE_HEADER = "InvalidRegisterOsSourceHeader" + INVALID_REGISTER_OS_TRIGGER_HEADER = "InvalidRegisterOsTriggerHeader" + WEB_AND_OS_HEADERS = "WebAndOsHeaders" + NO_WEB_OR_OS_SUPPORT = "NoWebOrOsSupport" + NAVIGATION_REGISTRATION_WITHOUT_TRANSIENT_USER_ACTIVATION = ( + "NavigationRegistrationWithoutTransientUserActivation" + ) + INVALID_INFO_HEADER = "InvalidInfoHeader" + NO_REGISTER_SOURCE_HEADER = "NoRegisterSourceHeader" + NO_REGISTER_TRIGGER_HEADER = "NoRegisterTriggerHeader" + NO_REGISTER_OS_SOURCE_HEADER = "NoRegisterOsSourceHeader" + NO_REGISTER_OS_TRIGGER_HEADER = "NoRegisterOsTriggerHeader" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingIssueType: + return cls(json)
+ + + +
+[docs] +class SharedDictionaryError(enum.Enum): + USE_ERROR_CROSS_ORIGIN_NO_CORS_REQUEST = "UseErrorCrossOriginNoCorsRequest" + USE_ERROR_DICTIONARY_LOAD_FAILURE = "UseErrorDictionaryLoadFailure" + USE_ERROR_MATCHING_DICTIONARY_NOT_USED = "UseErrorMatchingDictionaryNotUsed" + USE_ERROR_UNEXPECTED_CONTENT_DICTIONARY_HEADER = ( + "UseErrorUnexpectedContentDictionaryHeader" + ) + WRITE_ERROR_COSS_ORIGIN_NO_CORS_REQUEST = "WriteErrorCossOriginNoCorsRequest" + WRITE_ERROR_DISALLOWED_BY_SETTINGS = "WriteErrorDisallowedBySettings" + WRITE_ERROR_EXPIRED_RESPONSE = "WriteErrorExpiredResponse" + WRITE_ERROR_FEATURE_DISABLED = "WriteErrorFeatureDisabled" + WRITE_ERROR_INSUFFICIENT_RESOURCES = "WriteErrorInsufficientResources" + WRITE_ERROR_INVALID_MATCH_FIELD = "WriteErrorInvalidMatchField" + WRITE_ERROR_INVALID_STRUCTURED_HEADER = "WriteErrorInvalidStructuredHeader" + WRITE_ERROR_NAVIGATION_REQUEST = "WriteErrorNavigationRequest" + WRITE_ERROR_NO_MATCH_FIELD = "WriteErrorNoMatchField" + WRITE_ERROR_NON_LIST_MATCH_DEST_FIELD = "WriteErrorNonListMatchDestField" + WRITE_ERROR_NON_SECURE_CONTEXT = "WriteErrorNonSecureContext" + WRITE_ERROR_NON_STRING_ID_FIELD = "WriteErrorNonStringIdField" + WRITE_ERROR_NON_STRING_IN_MATCH_DEST_LIST = "WriteErrorNonStringInMatchDestList" + WRITE_ERROR_NON_STRING_MATCH_FIELD = "WriteErrorNonStringMatchField" + WRITE_ERROR_NON_TOKEN_TYPE_FIELD = "WriteErrorNonTokenTypeField" + WRITE_ERROR_REQUEST_ABORTED = "WriteErrorRequestAborted" + WRITE_ERROR_SHUTTING_DOWN = "WriteErrorShuttingDown" + WRITE_ERROR_TOO_LONG_ID_FIELD = "WriteErrorTooLongIdField" + WRITE_ERROR_UNSUPPORTED_TYPE = "WriteErrorUnsupportedType" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SharedDictionaryError: + return cls(json)
+ + + +
+[docs] +@dataclass +class AttributionReportingIssueDetails: + """ + Details for issues around "Attribution Reporting API" usage. + Explainer: https://github.com/WICG/attribution-reporting-api + """ + + violation_type: AttributionReportingIssueType + + request: typing.Optional[AffectedRequest] = None + + violating_node_id: typing.Optional[dom.BackendNodeId] = None + + invalid_parameter: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["violationType"] = self.violation_type.to_json() + if self.request is not None: + json["request"] = self.request.to_json() + if self.violating_node_id is not None: + json["violatingNodeId"] = self.violating_node_id.to_json() + if self.invalid_parameter is not None: + json["invalidParameter"] = self.invalid_parameter + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingIssueDetails: + return cls( + violation_type=AttributionReportingIssueType.from_json( + json["violationType"] + ), + request=( + AffectedRequest.from_json(json["request"]) + if json.get("request", None) is not None + else None + ), + violating_node_id=( + dom.BackendNodeId.from_json(json["violatingNodeId"]) + if json.get("violatingNodeId", None) is not None + else None + ), + invalid_parameter=( + str(json["invalidParameter"]) + if json.get("invalidParameter", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class QuirksModeIssueDetails: + """ + Details for issues about documents in Quirks Mode + or Limited Quirks Mode that affects page layouting. + """ + + #: If false, it means the document's mode is "quirks" + #: instead of "limited-quirks". + is_limited_quirks_mode: bool + + document_node_id: dom.BackendNodeId + + url: str + + frame_id: page.FrameId + + loader_id: network.LoaderId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["isLimitedQuirksMode"] = self.is_limited_quirks_mode + json["documentNodeId"] = self.document_node_id.to_json() + json["url"] = self.url + json["frameId"] = self.frame_id.to_json() + json["loaderId"] = self.loader_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> QuirksModeIssueDetails: + return cls( + is_limited_quirks_mode=bool(json["isLimitedQuirksMode"]), + document_node_id=dom.BackendNodeId.from_json(json["documentNodeId"]), + url=str(json["url"]), + frame_id=page.FrameId.from_json(json["frameId"]), + loader_id=network.LoaderId.from_json(json["loaderId"]), + )
+ + + + + + + +
+[docs] +@dataclass +class SharedDictionaryIssueDetails: + shared_dictionary_error: SharedDictionaryError + + request: AffectedRequest + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sharedDictionaryError"] = self.shared_dictionary_error.to_json() + json["request"] = self.request.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedDictionaryIssueDetails: + return cls( + shared_dictionary_error=SharedDictionaryError.from_json( + json["sharedDictionaryError"] + ), + request=AffectedRequest.from_json(json["request"]), + )
+ + + +
+[docs] +class GenericIssueErrorType(enum.Enum): + CROSS_ORIGIN_PORTAL_POST_MESSAGE_ERROR = "CrossOriginPortalPostMessageError" + FORM_LABEL_FOR_NAME_ERROR = "FormLabelForNameError" + FORM_DUPLICATE_ID_FOR_INPUT_ERROR = "FormDuplicateIdForInputError" + FORM_INPUT_WITH_NO_LABEL_ERROR = "FormInputWithNoLabelError" + FORM_AUTOCOMPLETE_ATTRIBUTE_EMPTY_ERROR = "FormAutocompleteAttributeEmptyError" + FORM_EMPTY_ID_AND_NAME_ATTRIBUTES_FOR_INPUT_ERROR = ( + "FormEmptyIdAndNameAttributesForInputError" + ) + FORM_ARIA_LABELLED_BY_TO_NON_EXISTING_ID = "FormAriaLabelledByToNonExistingId" + FORM_INPUT_ASSIGNED_AUTOCOMPLETE_VALUE_TO_ID_OR_NAME_ATTRIBUTE_ERROR = ( + "FormInputAssignedAutocompleteValueToIdOrNameAttributeError" + ) + FORM_LABEL_HAS_NEITHER_FOR_NOR_NESTED_INPUT = "FormLabelHasNeitherForNorNestedInput" + FORM_LABEL_FOR_MATCHES_NON_EXISTING_ID_ERROR = ( + "FormLabelForMatchesNonExistingIdError" + ) + FORM_INPUT_HAS_WRONG_BUT_WELL_INTENDED_AUTOCOMPLETE_VALUE_ERROR = ( + "FormInputHasWrongButWellIntendedAutocompleteValueError" + ) + RESPONSE_WAS_BLOCKED_BY_ORB = "ResponseWasBlockedByORB" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> GenericIssueErrorType: + return cls(json)
+ + + +
+[docs] +@dataclass +class GenericIssueDetails: + """ + Depending on the concrete errorType, different properties are set. + """ + + #: Issues with the same errorType are aggregated in the frontend. + error_type: GenericIssueErrorType + + frame_id: typing.Optional[page.FrameId] = None + + violating_node_id: typing.Optional[dom.BackendNodeId] = None + + violating_node_attribute: typing.Optional[str] = None + + request: typing.Optional[AffectedRequest] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["errorType"] = self.error_type.to_json() + if self.frame_id is not None: + json["frameId"] = self.frame_id.to_json() + if self.violating_node_id is not None: + json["violatingNodeId"] = self.violating_node_id.to_json() + if self.violating_node_attribute is not None: + json["violatingNodeAttribute"] = self.violating_node_attribute + if self.request is not None: + json["request"] = self.request.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> GenericIssueDetails: + return cls( + error_type=GenericIssueErrorType.from_json(json["errorType"]), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + violating_node_id=( + dom.BackendNodeId.from_json(json["violatingNodeId"]) + if json.get("violatingNodeId", None) is not None + else None + ), + violating_node_attribute=( + str(json["violatingNodeAttribute"]) + if json.get("violatingNodeAttribute", None) is not None + else None + ), + request=( + AffectedRequest.from_json(json["request"]) + if json.get("request", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class DeprecationIssueDetails: + """ + This issue tracks information needed to print a deprecation message. + https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/core/frame/third_party/blink/renderer/core/frame/deprecation/README.md + """ + + source_code_location: SourceCodeLocation + + #: One of the deprecation names from third_party/blink/renderer/core/frame/deprecation/deprecation.json5 + type_: str + + affected_frame: typing.Optional[AffectedFrame] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sourceCodeLocation"] = self.source_code_location.to_json() + json["type"] = self.type_ + if self.affected_frame is not None: + json["affectedFrame"] = self.affected_frame.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DeprecationIssueDetails: + return cls( + source_code_location=SourceCodeLocation.from_json( + json["sourceCodeLocation"] + ), + type_=str(json["type"]), + affected_frame=( + AffectedFrame.from_json(json["affectedFrame"]) + if json.get("affectedFrame", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class BounceTrackingIssueDetails: + """ + This issue warns about sites in the redirect chain of a finished navigation + that may be flagged as trackers and have their state cleared if they don't + receive a user interaction. Note that in this context 'site' means eTLD+1. + For example, if the URL ``https://example.test:80/bounce`` was in the + redirect chain, the site reported would be ``example.test``. + """ + + tracking_sites: typing.List[str] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["trackingSites"] = [i for i in self.tracking_sites] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BounceTrackingIssueDetails: + return cls( + tracking_sites=[str(i) for i in json["trackingSites"]], + )
+ + + +
+[docs] +@dataclass +class CookieDeprecationMetadataIssueDetails: + """ + This issue warns about third-party sites that are accessing cookies on the + current page, and have been permitted due to having a global metadata grant. + Note that in this context 'site' means eTLD+1. For example, if the URL + ``https://example.test:80/web_page`` was accessing cookies, the site reported + would be ``example.test``. + """ + + allowed_sites: typing.List[str] + + opt_out_percentage: float + + is_opt_out_top_level: bool + + operation: CookieOperation + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["allowedSites"] = [i for i in self.allowed_sites] + json["optOutPercentage"] = self.opt_out_percentage + json["isOptOutTopLevel"] = self.is_opt_out_top_level + json["operation"] = self.operation.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CookieDeprecationMetadataIssueDetails: + return cls( + allowed_sites=[str(i) for i in json["allowedSites"]], + opt_out_percentage=float(json["optOutPercentage"]), + is_opt_out_top_level=bool(json["isOptOutTopLevel"]), + operation=CookieOperation.from_json(json["operation"]), + )
+ + + +
+[docs] +class ClientHintIssueReason(enum.Enum): + META_TAG_ALLOW_LIST_INVALID_ORIGIN = "MetaTagAllowListInvalidOrigin" + META_TAG_MODIFIED_HTML = "MetaTagModifiedHTML" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ClientHintIssueReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class FederatedAuthRequestIssueDetails: + federated_auth_request_issue_reason: FederatedAuthRequestIssueReason + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["federatedAuthRequestIssueReason"] = ( + self.federated_auth_request_issue_reason.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FederatedAuthRequestIssueDetails: + return cls( + federated_auth_request_issue_reason=FederatedAuthRequestIssueReason.from_json( + json["federatedAuthRequestIssueReason"] + ), + )
+ + + +
+[docs] +class FederatedAuthRequestIssueReason(enum.Enum): + """ + Represents the failure reason when a federated authentication reason fails. + Should be updated alongside RequestIdTokenStatus in + third_party/blink/public/mojom/devtools/inspector_issue.mojom to include + all cases except for success. + """ + + SHOULD_EMBARGO = "ShouldEmbargo" + TOO_MANY_REQUESTS = "TooManyRequests" + WELL_KNOWN_HTTP_NOT_FOUND = "WellKnownHttpNotFound" + WELL_KNOWN_NO_RESPONSE = "WellKnownNoResponse" + WELL_KNOWN_INVALID_RESPONSE = "WellKnownInvalidResponse" + WELL_KNOWN_LIST_EMPTY = "WellKnownListEmpty" + WELL_KNOWN_INVALID_CONTENT_TYPE = "WellKnownInvalidContentType" + CONFIG_NOT_IN_WELL_KNOWN = "ConfigNotInWellKnown" + WELL_KNOWN_TOO_BIG = "WellKnownTooBig" + CONFIG_HTTP_NOT_FOUND = "ConfigHttpNotFound" + CONFIG_NO_RESPONSE = "ConfigNoResponse" + CONFIG_INVALID_RESPONSE = "ConfigInvalidResponse" + CONFIG_INVALID_CONTENT_TYPE = "ConfigInvalidContentType" + CLIENT_METADATA_HTTP_NOT_FOUND = "ClientMetadataHttpNotFound" + CLIENT_METADATA_NO_RESPONSE = "ClientMetadataNoResponse" + CLIENT_METADATA_INVALID_RESPONSE = "ClientMetadataInvalidResponse" + CLIENT_METADATA_INVALID_CONTENT_TYPE = "ClientMetadataInvalidContentType" + IDP_NOT_POTENTIALLY_TRUSTWORTHY = "IdpNotPotentiallyTrustworthy" + DISABLED_IN_SETTINGS = "DisabledInSettings" + DISABLED_IN_FLAGS = "DisabledInFlags" + ERROR_FETCHING_SIGNIN = "ErrorFetchingSignin" + INVALID_SIGNIN_RESPONSE = "InvalidSigninResponse" + ACCOUNTS_HTTP_NOT_FOUND = "AccountsHttpNotFound" + ACCOUNTS_NO_RESPONSE = "AccountsNoResponse" + ACCOUNTS_INVALID_RESPONSE = "AccountsInvalidResponse" + ACCOUNTS_LIST_EMPTY = "AccountsListEmpty" + ACCOUNTS_INVALID_CONTENT_TYPE = "AccountsInvalidContentType" + ID_TOKEN_HTTP_NOT_FOUND = "IdTokenHttpNotFound" + ID_TOKEN_NO_RESPONSE = "IdTokenNoResponse" + ID_TOKEN_INVALID_RESPONSE = "IdTokenInvalidResponse" + ID_TOKEN_IDP_ERROR_RESPONSE = "IdTokenIdpErrorResponse" + ID_TOKEN_CROSS_SITE_IDP_ERROR_RESPONSE = "IdTokenCrossSiteIdpErrorResponse" + ID_TOKEN_INVALID_REQUEST = "IdTokenInvalidRequest" + ID_TOKEN_INVALID_CONTENT_TYPE = "IdTokenInvalidContentType" + ERROR_ID_TOKEN = "ErrorIdToken" + CANCELED = "Canceled" + RP_PAGE_NOT_VISIBLE = "RpPageNotVisible" + SILENT_MEDIATION_FAILURE = "SilentMediationFailure" + THIRD_PARTY_COOKIES_BLOCKED = "ThirdPartyCookiesBlocked" + NOT_SIGNED_IN_WITH_IDP = "NotSignedInWithIdp" + MISSING_TRANSIENT_USER_ACTIVATION = "MissingTransientUserActivation" + REPLACED_BY_BUTTON_MODE = "ReplacedByButtonMode" + INVALID_FIELDS_SPECIFIED = "InvalidFieldsSpecified" + RELYING_PARTY_ORIGIN_IS_OPAQUE = "RelyingPartyOriginIsOpaque" + TYPE_NOT_MATCHING = "TypeNotMatching" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> FederatedAuthRequestIssueReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class FederatedAuthUserInfoRequestIssueDetails: + federated_auth_user_info_request_issue_reason: ( + FederatedAuthUserInfoRequestIssueReason + ) + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["federatedAuthUserInfoRequestIssueReason"] = ( + self.federated_auth_user_info_request_issue_reason.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FederatedAuthUserInfoRequestIssueDetails: + return cls( + federated_auth_user_info_request_issue_reason=FederatedAuthUserInfoRequestIssueReason.from_json( + json["federatedAuthUserInfoRequestIssueReason"] + ), + )
+ + + +
+[docs] +class FederatedAuthUserInfoRequestIssueReason(enum.Enum): + """ + Represents the failure reason when a getUserInfo() call fails. + Should be updated alongside FederatedAuthUserInfoRequestResult in + third_party/blink/public/mojom/devtools/inspector_issue.mojom. + """ + + NOT_SAME_ORIGIN = "NotSameOrigin" + NOT_IFRAME = "NotIframe" + NOT_POTENTIALLY_TRUSTWORTHY = "NotPotentiallyTrustworthy" + NO_API_PERMISSION = "NoApiPermission" + NOT_SIGNED_IN_WITH_IDP = "NotSignedInWithIdp" + NO_ACCOUNT_SHARING_PERMISSION = "NoAccountSharingPermission" + INVALID_CONFIG_OR_WELL_KNOWN = "InvalidConfigOrWellKnown" + INVALID_ACCOUNTS_RESPONSE = "InvalidAccountsResponse" + NO_RETURNING_USER_FROM_FETCHED_ACCOUNTS = "NoReturningUserFromFetchedAccounts" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> FederatedAuthUserInfoRequestIssueReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class ClientHintIssueDetails: + """ + This issue tracks client hints related issues. It's used to deprecate old + features, encourage the use of new ones, and provide general guidance. + """ + + source_code_location: SourceCodeLocation + + client_hint_issue_reason: ClientHintIssueReason + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sourceCodeLocation"] = self.source_code_location.to_json() + json["clientHintIssueReason"] = self.client_hint_issue_reason.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ClientHintIssueDetails: + return cls( + source_code_location=SourceCodeLocation.from_json( + json["sourceCodeLocation"] + ), + client_hint_issue_reason=ClientHintIssueReason.from_json( + json["clientHintIssueReason"] + ), + )
+ + + +
+[docs] +@dataclass +class FailedRequestInfo: + #: The URL that failed to load. + url: str + + #: The failure message for the failed request. + failure_message: str + + request_id: typing.Optional[network.RequestId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["failureMessage"] = self.failure_message + if self.request_id is not None: + json["requestId"] = self.request_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FailedRequestInfo: + return cls( + url=str(json["url"]), + failure_message=str(json["failureMessage"]), + request_id=( + network.RequestId.from_json(json["requestId"]) + if json.get("requestId", None) is not None + else None + ), + )
+ + + +
+[docs] +class StyleSheetLoadingIssueReason(enum.Enum): + LATE_IMPORT_RULE = "LateImportRule" + REQUEST_FAILED = "RequestFailed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StyleSheetLoadingIssueReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class StylesheetLoadingIssueDetails: + """ + This issue warns when a referenced stylesheet couldn't be loaded. + """ + + #: Source code position that referenced the failing stylesheet. + source_code_location: SourceCodeLocation + + #: Reason why the stylesheet couldn't be loaded. + style_sheet_loading_issue_reason: StyleSheetLoadingIssueReason + + #: Contains additional info when the failure was due to a request. + failed_request_info: typing.Optional[FailedRequestInfo] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sourceCodeLocation"] = self.source_code_location.to_json() + json["styleSheetLoadingIssueReason"] = ( + self.style_sheet_loading_issue_reason.to_json() + ) + if self.failed_request_info is not None: + json["failedRequestInfo"] = self.failed_request_info.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StylesheetLoadingIssueDetails: + return cls( + source_code_location=SourceCodeLocation.from_json( + json["sourceCodeLocation"] + ), + style_sheet_loading_issue_reason=StyleSheetLoadingIssueReason.from_json( + json["styleSheetLoadingIssueReason"] + ), + failed_request_info=( + FailedRequestInfo.from_json(json["failedRequestInfo"]) + if json.get("failedRequestInfo", None) is not None + else None + ), + )
+ + + +
+[docs] +class PropertyRuleIssueReason(enum.Enum): + INVALID_SYNTAX = "InvalidSyntax" + INVALID_INITIAL_VALUE = "InvalidInitialValue" + INVALID_INHERITS = "InvalidInherits" + INVALID_NAME = "InvalidName" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PropertyRuleIssueReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class PropertyRuleIssueDetails: + """ + This issue warns about errors in property rules that lead to property + registrations being ignored. + """ + + #: Source code position of the property rule. + source_code_location: SourceCodeLocation + + #: Reason why the property rule was discarded. + property_rule_issue_reason: PropertyRuleIssueReason + + #: The value of the property rule property that failed to parse + property_value: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["sourceCodeLocation"] = self.source_code_location.to_json() + json["propertyRuleIssueReason"] = self.property_rule_issue_reason.to_json() + if self.property_value is not None: + json["propertyValue"] = self.property_value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PropertyRuleIssueDetails: + return cls( + source_code_location=SourceCodeLocation.from_json( + json["sourceCodeLocation"] + ), + property_rule_issue_reason=PropertyRuleIssueReason.from_json( + json["propertyRuleIssueReason"] + ), + property_value=( + str(json["propertyValue"]) + if json.get("propertyValue", None) is not None + else None + ), + )
+ + + +
+[docs] +class InspectorIssueCode(enum.Enum): + """ + A unique identifier for the type of issue. Each type may use one of the + optional fields in InspectorIssueDetails to convey more specific + information about the kind of issue. + """ + + COOKIE_ISSUE = "CookieIssue" + MIXED_CONTENT_ISSUE = "MixedContentIssue" + BLOCKED_BY_RESPONSE_ISSUE = "BlockedByResponseIssue" + HEAVY_AD_ISSUE = "HeavyAdIssue" + CONTENT_SECURITY_POLICY_ISSUE = "ContentSecurityPolicyIssue" + SHARED_ARRAY_BUFFER_ISSUE = "SharedArrayBufferIssue" + LOW_TEXT_CONTRAST_ISSUE = "LowTextContrastIssue" + CORS_ISSUE = "CorsIssue" + ATTRIBUTION_REPORTING_ISSUE = "AttributionReportingIssue" + QUIRKS_MODE_ISSUE = "QuirksModeIssue" + NAVIGATOR_USER_AGENT_ISSUE = "NavigatorUserAgentIssue" + GENERIC_ISSUE = "GenericIssue" + DEPRECATION_ISSUE = "DeprecationIssue" + CLIENT_HINT_ISSUE = "ClientHintIssue" + FEDERATED_AUTH_REQUEST_ISSUE = "FederatedAuthRequestIssue" + BOUNCE_TRACKING_ISSUE = "BounceTrackingIssue" + COOKIE_DEPRECATION_METADATA_ISSUE = "CookieDeprecationMetadataIssue" + STYLESHEET_LOADING_ISSUE = "StylesheetLoadingIssue" + FEDERATED_AUTH_USER_INFO_REQUEST_ISSUE = "FederatedAuthUserInfoRequestIssue" + PROPERTY_RULE_ISSUE = "PropertyRuleIssue" + SHARED_DICTIONARY_ISSUE = "SharedDictionaryIssue" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InspectorIssueCode: + return cls(json)
+ + + +
+[docs] +@dataclass +class InspectorIssueDetails: + """ + This struct holds a list of optional fields with additional information + specific to the kind of issue. When adding a new issue code, please also + add a new optional field to this type. + """ + + cookie_issue_details: typing.Optional[CookieIssueDetails] = None + + mixed_content_issue_details: typing.Optional[MixedContentIssueDetails] = None + + blocked_by_response_issue_details: typing.Optional[ + BlockedByResponseIssueDetails + ] = None + + heavy_ad_issue_details: typing.Optional[HeavyAdIssueDetails] = None + + content_security_policy_issue_details: typing.Optional[ + ContentSecurityPolicyIssueDetails + ] = None + + shared_array_buffer_issue_details: typing.Optional[ + SharedArrayBufferIssueDetails + ] = None + + low_text_contrast_issue_details: typing.Optional[LowTextContrastIssueDetails] = None + + cors_issue_details: typing.Optional[CorsIssueDetails] = None + + attribution_reporting_issue_details: typing.Optional[ + AttributionReportingIssueDetails + ] = None + + quirks_mode_issue_details: typing.Optional[QuirksModeIssueDetails] = None + + navigator_user_agent_issue_details: typing.Optional[ + NavigatorUserAgentIssueDetails + ] = None + + generic_issue_details: typing.Optional[GenericIssueDetails] = None + + deprecation_issue_details: typing.Optional[DeprecationIssueDetails] = None + + client_hint_issue_details: typing.Optional[ClientHintIssueDetails] = None + + federated_auth_request_issue_details: typing.Optional[ + FederatedAuthRequestIssueDetails + ] = None + + bounce_tracking_issue_details: typing.Optional[BounceTrackingIssueDetails] = None + + cookie_deprecation_metadata_issue_details: typing.Optional[ + CookieDeprecationMetadataIssueDetails + ] = None + + stylesheet_loading_issue_details: typing.Optional[StylesheetLoadingIssueDetails] = ( + None + ) + + property_rule_issue_details: typing.Optional[PropertyRuleIssueDetails] = None + + federated_auth_user_info_request_issue_details: typing.Optional[ + FederatedAuthUserInfoRequestIssueDetails + ] = None + + shared_dictionary_issue_details: typing.Optional[SharedDictionaryIssueDetails] = ( + None + ) + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.cookie_issue_details is not None: + json["cookieIssueDetails"] = self.cookie_issue_details.to_json() + if self.mixed_content_issue_details is not None: + json["mixedContentIssueDetails"] = ( + self.mixed_content_issue_details.to_json() + ) + if self.blocked_by_response_issue_details is not None: + json["blockedByResponseIssueDetails"] = ( + self.blocked_by_response_issue_details.to_json() + ) + if self.heavy_ad_issue_details is not None: + json["heavyAdIssueDetails"] = self.heavy_ad_issue_details.to_json() + if self.content_security_policy_issue_details is not None: + json["contentSecurityPolicyIssueDetails"] = ( + self.content_security_policy_issue_details.to_json() + ) + if self.shared_array_buffer_issue_details is not None: + json["sharedArrayBufferIssueDetails"] = ( + self.shared_array_buffer_issue_details.to_json() + ) + if self.low_text_contrast_issue_details is not None: + json["lowTextContrastIssueDetails"] = ( + self.low_text_contrast_issue_details.to_json() + ) + if self.cors_issue_details is not None: + json["corsIssueDetails"] = self.cors_issue_details.to_json() + if self.attribution_reporting_issue_details is not None: + json["attributionReportingIssueDetails"] = ( + self.attribution_reporting_issue_details.to_json() + ) + if self.quirks_mode_issue_details is not None: + json["quirksModeIssueDetails"] = self.quirks_mode_issue_details.to_json() + if self.navigator_user_agent_issue_details is not None: + json["navigatorUserAgentIssueDetails"] = ( + self.navigator_user_agent_issue_details.to_json() + ) + if self.generic_issue_details is not None: + json["genericIssueDetails"] = self.generic_issue_details.to_json() + if self.deprecation_issue_details is not None: + json["deprecationIssueDetails"] = self.deprecation_issue_details.to_json() + if self.client_hint_issue_details is not None: + json["clientHintIssueDetails"] = self.client_hint_issue_details.to_json() + if self.federated_auth_request_issue_details is not None: + json["federatedAuthRequestIssueDetails"] = ( + self.federated_auth_request_issue_details.to_json() + ) + if self.bounce_tracking_issue_details is not None: + json["bounceTrackingIssueDetails"] = ( + self.bounce_tracking_issue_details.to_json() + ) + if self.cookie_deprecation_metadata_issue_details is not None: + json["cookieDeprecationMetadataIssueDetails"] = ( + self.cookie_deprecation_metadata_issue_details.to_json() + ) + if self.stylesheet_loading_issue_details is not None: + json["stylesheetLoadingIssueDetails"] = ( + self.stylesheet_loading_issue_details.to_json() + ) + if self.property_rule_issue_details is not None: + json["propertyRuleIssueDetails"] = ( + self.property_rule_issue_details.to_json() + ) + if self.federated_auth_user_info_request_issue_details is not None: + json["federatedAuthUserInfoRequestIssueDetails"] = ( + self.federated_auth_user_info_request_issue_details.to_json() + ) + if self.shared_dictionary_issue_details is not None: + json["sharedDictionaryIssueDetails"] = ( + self.shared_dictionary_issue_details.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InspectorIssueDetails: + return cls( + cookie_issue_details=( + CookieIssueDetails.from_json(json["cookieIssueDetails"]) + if json.get("cookieIssueDetails", None) is not None + else None + ), + mixed_content_issue_details=( + MixedContentIssueDetails.from_json(json["mixedContentIssueDetails"]) + if json.get("mixedContentIssueDetails", None) is not None + else None + ), + blocked_by_response_issue_details=( + BlockedByResponseIssueDetails.from_json( + json["blockedByResponseIssueDetails"] + ) + if json.get("blockedByResponseIssueDetails", None) is not None + else None + ), + heavy_ad_issue_details=( + HeavyAdIssueDetails.from_json(json["heavyAdIssueDetails"]) + if json.get("heavyAdIssueDetails", None) is not None + else None + ), + content_security_policy_issue_details=( + ContentSecurityPolicyIssueDetails.from_json( + json["contentSecurityPolicyIssueDetails"] + ) + if json.get("contentSecurityPolicyIssueDetails", None) is not None + else None + ), + shared_array_buffer_issue_details=( + SharedArrayBufferIssueDetails.from_json( + json["sharedArrayBufferIssueDetails"] + ) + if json.get("sharedArrayBufferIssueDetails", None) is not None + else None + ), + low_text_contrast_issue_details=( + LowTextContrastIssueDetails.from_json( + json["lowTextContrastIssueDetails"] + ) + if json.get("lowTextContrastIssueDetails", None) is not None + else None + ), + cors_issue_details=( + CorsIssueDetails.from_json(json["corsIssueDetails"]) + if json.get("corsIssueDetails", None) is not None + else None + ), + attribution_reporting_issue_details=( + AttributionReportingIssueDetails.from_json( + json["attributionReportingIssueDetails"] + ) + if json.get("attributionReportingIssueDetails", None) is not None + else None + ), + quirks_mode_issue_details=( + QuirksModeIssueDetails.from_json(json["quirksModeIssueDetails"]) + if json.get("quirksModeIssueDetails", None) is not None + else None + ), + navigator_user_agent_issue_details=( + NavigatorUserAgentIssueDetails.from_json( + json["navigatorUserAgentIssueDetails"] + ) + if json.get("navigatorUserAgentIssueDetails", None) is not None + else None + ), + generic_issue_details=( + GenericIssueDetails.from_json(json["genericIssueDetails"]) + if json.get("genericIssueDetails", None) is not None + else None + ), + deprecation_issue_details=( + DeprecationIssueDetails.from_json(json["deprecationIssueDetails"]) + if json.get("deprecationIssueDetails", None) is not None + else None + ), + client_hint_issue_details=( + ClientHintIssueDetails.from_json(json["clientHintIssueDetails"]) + if json.get("clientHintIssueDetails", None) is not None + else None + ), + federated_auth_request_issue_details=( + FederatedAuthRequestIssueDetails.from_json( + json["federatedAuthRequestIssueDetails"] + ) + if json.get("federatedAuthRequestIssueDetails", None) is not None + else None + ), + bounce_tracking_issue_details=( + BounceTrackingIssueDetails.from_json(json["bounceTrackingIssueDetails"]) + if json.get("bounceTrackingIssueDetails", None) is not None + else None + ), + cookie_deprecation_metadata_issue_details=( + CookieDeprecationMetadataIssueDetails.from_json( + json["cookieDeprecationMetadataIssueDetails"] + ) + if json.get("cookieDeprecationMetadataIssueDetails", None) is not None + else None + ), + stylesheet_loading_issue_details=( + StylesheetLoadingIssueDetails.from_json( + json["stylesheetLoadingIssueDetails"] + ) + if json.get("stylesheetLoadingIssueDetails", None) is not None + else None + ), + property_rule_issue_details=( + PropertyRuleIssueDetails.from_json(json["propertyRuleIssueDetails"]) + if json.get("propertyRuleIssueDetails", None) is not None + else None + ), + federated_auth_user_info_request_issue_details=( + FederatedAuthUserInfoRequestIssueDetails.from_json( + json["federatedAuthUserInfoRequestIssueDetails"] + ) + if json.get("federatedAuthUserInfoRequestIssueDetails", None) + is not None + else None + ), + shared_dictionary_issue_details=( + SharedDictionaryIssueDetails.from_json( + json["sharedDictionaryIssueDetails"] + ) + if json.get("sharedDictionaryIssueDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +class IssueId(str): + """ + A unique id for a DevTools inspector issue. Allows other entities (e.g. + exceptions, CDP message, console messages, etc.) to reference an issue. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> IssueId: + return cls(json) + + def __repr__(self): + return "IssueId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class InspectorIssue: + """ + An inspector issue reported from the back-end. + """ + + code: InspectorIssueCode + + details: InspectorIssueDetails + + #: A unique id for this issue. May be omitted if no other entity (e.g. + #: exception, CDP message, etc.) is referencing this issue. + issue_id: typing.Optional[IssueId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["code"] = self.code.to_json() + json["details"] = self.details.to_json() + if self.issue_id is not None: + json["issueId"] = self.issue_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InspectorIssue: + return cls( + code=InspectorIssueCode.from_json(json["code"]), + details=InspectorIssueDetails.from_json(json["details"]), + issue_id=( + IssueId.from_json(json["issueId"]) + if json.get("issueId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_encoded_response( + request_id: network.RequestId, + encoding: str, + quality: typing.Optional[float] = None, + size_only: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.Optional[str], int, int] +]: + """ + Returns the response body and size if it were re-encoded with the specified settings. Only + applies to images. + + :param request_id: Identifier of the network request to get content for. + :param encoding: The encoding to use. + :param quality: *(Optional)* The quality of the encoding (0-1). (defaults to 1) + :param size_only: *(Optional)* Whether to only return the size information (defaults to false). + :returns: A tuple with the following items: + + 0. **body** - *(Optional)* The encoded body as a base64 string. Omitted if sizeOnly is true. (Encoded as a base64 string when passed over JSON) + 1. **originalSize** - Size before re-encoding. + 2. **encodedSize** - Size after re-encoding. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + params["encoding"] = encoding + if quality is not None: + params["quality"] = quality + if size_only is not None: + params["sizeOnly"] = size_only + cmd_dict: T_JSON_DICT = { + "method": "Audits.getEncodedResponse", + "params": params, + } + json = yield cmd_dict + return ( + str(json["body"]) if json.get("body", None) is not None else None, + int(json["originalSize"]), + int(json["encodedSize"]), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables issues domain, prevents further issues from being reported to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Audits.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables issues domain, sends the issues collected so far to the client by means of the + ``issueAdded`` event. + """ + cmd_dict: T_JSON_DICT = { + "method": "Audits.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def check_contrast( + report_aaa: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Runs the contrast check for the target page. Found issues are reported + using Audits.issueAdded event. + + :param report_aaa: *(Optional)* Whether to report WCAG AAA level issues. Default is false. + """ + params: T_JSON_DICT = dict() + if report_aaa is not None: + params["reportAAA"] = report_aaa + cmd_dict: T_JSON_DICT = { + "method": "Audits.checkContrast", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def check_forms_issues() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[GenericIssueDetails]] +): + """ + Runs the form issues check for the target page. Found issues are reported + using Audits.issueAdded event. + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Audits.checkFormsIssues", + } + json = yield cmd_dict + return [GenericIssueDetails.from_json(i) for i in json["formIssues"]]
+ + + +
+[docs] +@event_class("Audits.issueAdded") +@dataclass +class IssueAdded: + issue: InspectorIssue + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IssueAdded: + return cls(issue=InspectorIssue.from_json(json["issue"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/autofill.html b/docs/_build/html/_modules/nodriver/cdp/autofill.html new file mode 100644 index 0000000..7cbd788 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/autofill.html @@ -0,0 +1,627 @@ + + + + + + + + nodriver.cdp.autofill - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.autofill

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Autofill (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import page
+
+
+
+[docs] +@dataclass +class CreditCard: + #: 16-digit credit card number. + number: str + + #: Name of the credit card owner. + name: str + + #: 2-digit expiry month. + expiry_month: str + + #: 4-digit expiry year. + expiry_year: str + + #: 3-digit card verification code. + cvc: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["number"] = self.number + json["name"] = self.name + json["expiryMonth"] = self.expiry_month + json["expiryYear"] = self.expiry_year + json["cvc"] = self.cvc + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CreditCard: + return cls( + number=str(json["number"]), + name=str(json["name"]), + expiry_month=str(json["expiryMonth"]), + expiry_year=str(json["expiryYear"]), + cvc=str(json["cvc"]), + )
+ + + +
+[docs] +@dataclass +class AddressField: + #: address field name, for example GIVEN_NAME. + name: str + + #: address field value, for example Jon Doe. + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddressField: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class AddressFields: + """ + A list of address fields. + """ + + fields: typing.List[AddressField] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["fields"] = [i.to_json() for i in self.fields] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddressFields: + return cls( + fields=[AddressField.from_json(i) for i in json["fields"]], + )
+ + + +
+[docs] +@dataclass +class Address: + #: fields and values defining an address. + fields: typing.List[AddressField] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["fields"] = [i.to_json() for i in self.fields] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Address: + return cls( + fields=[AddressField.from_json(i) for i in json["fields"]], + )
+ + + +
+[docs] +@dataclass +class AddressUI: + """ + Defines how an address can be displayed like in chrome://settings/addresses. + Address UI is a two dimensional array, each inner array is an "address information line", and when rendered in a UI surface should be displayed as such. + The following address UI for instance: + [[{name: "GIVE_NAME", value: "Jon"}, {name: "FAMILY_NAME", value: "Doe"}], [{name: "CITY", value: "Munich"}, {name: "ZIP", value: "81456"}]] + should allow the receiver to render: + Jon Doe + Munich 81456 + """ + + #: A two dimension array containing the representation of values from an address profile. + address_fields: typing.List[AddressFields] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["addressFields"] = [i.to_json() for i in self.address_fields] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddressUI: + return cls( + address_fields=[AddressFields.from_json(i) for i in json["addressFields"]], + )
+ + + +
+[docs] +class FillingStrategy(enum.Enum): + """ + Specified whether a filled field was done so by using the html autocomplete attribute or autofill heuristics. + """ + + AUTOCOMPLETE_ATTRIBUTE = "autocompleteAttribute" + AUTOFILL_INFERRED = "autofillInferred" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> FillingStrategy: + return cls(json)
+ + + +
+[docs] +@dataclass +class FilledField: + #: The type of the field, e.g text, password etc. + html_type: str + + #: the html id + id_: str + + #: the html name + name: str + + #: the field value + value: str + + #: The actual field type, e.g FAMILY_NAME + autofill_type: str + + #: The filling strategy + filling_strategy: FillingStrategy + + #: The frame the field belongs to + frame_id: page.FrameId + + #: The form field's DOM node + field_id: dom.BackendNodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["htmlType"] = self.html_type + json["id"] = self.id_ + json["name"] = self.name + json["value"] = self.value + json["autofillType"] = self.autofill_type + json["fillingStrategy"] = self.filling_strategy.to_json() + json["frameId"] = self.frame_id.to_json() + json["fieldId"] = self.field_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FilledField: + return cls( + html_type=str(json["htmlType"]), + id_=str(json["id"]), + name=str(json["name"]), + value=str(json["value"]), + autofill_type=str(json["autofillType"]), + filling_strategy=FillingStrategy.from_json(json["fillingStrategy"]), + frame_id=page.FrameId.from_json(json["frameId"]), + field_id=dom.BackendNodeId.from_json(json["fieldId"]), + )
+ + + +
+[docs] +def trigger( + field_id: dom.BackendNodeId, + card: CreditCard, + frame_id: typing.Optional[page.FrameId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Trigger autofill on a form identified by the fieldId. + If the field and related form cannot be autofilled, returns an error. + + :param field_id: Identifies a field that serves as an anchor for autofill. + :param frame_id: *(Optional)* Identifies the frame that field belongs to. + :param card: Credit card information to fill out the form. Credit card data is not saved. + """ + params: T_JSON_DICT = dict() + params["fieldId"] = field_id.to_json() + if frame_id is not None: + params["frameId"] = frame_id.to_json() + params["card"] = card.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Autofill.trigger", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_addresses( + addresses: typing.List[Address], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set addresses so that developers can verify their forms implementation. + + :param addresses: + """ + params: T_JSON_DICT = dict() + params["addresses"] = [i.to_json() for i in addresses] + cmd_dict: T_JSON_DICT = { + "method": "Autofill.setAddresses", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables autofill domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Autofill.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables autofill domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Autofill.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Autofill.addressFormFilled") +@dataclass +class AddressFormFilled: + """ + Emitted when an address form is filled. + """ + + #: Information about the fields that were filled + filled_fields: typing.List[FilledField] + #: An UI representation of the address used to fill the form. + #: Consists of a 2D array where each child represents an address/profile line. + address_ui: AddressUI + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddressFormFilled: + return cls( + filled_fields=[FilledField.from_json(i) for i in json["filledFields"]], + address_ui=AddressUI.from_json(json["addressUi"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/background_service.html b/docs/_build/html/_modules/nodriver/cdp/background_service.html new file mode 100644 index 0000000..d5b33fc --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/background_service.html @@ -0,0 +1,553 @@ + + + + + + + + nodriver.cdp.background_service - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.background_service

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: BackgroundService (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import network
+from . import service_worker
+
+
+
+[docs] +class ServiceName(enum.Enum): + """ + The Background Service that will be associated with the commands/events. + Every Background Service operates independently, but they share the same + API. + """ + + BACKGROUND_FETCH = "backgroundFetch" + BACKGROUND_SYNC = "backgroundSync" + PUSH_MESSAGING = "pushMessaging" + NOTIFICATIONS = "notifications" + PAYMENT_HANDLER = "paymentHandler" + PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ServiceName: + return cls(json)
+ + + +
+[docs] +@dataclass +class EventMetadata: + """ + A key-value pair for additional event information to pass along. + """ + + key: str + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> EventMetadata: + return cls( + key=str(json["key"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class BackgroundServiceEvent: + #: Timestamp of the event (in seconds). + timestamp: network.TimeSinceEpoch + + #: The origin this event belongs to. + origin: str + + #: The Service Worker ID that initiated the event. + service_worker_registration_id: service_worker.RegistrationID + + #: The Background Service this event belongs to. + service: ServiceName + + #: A description of the event. + event_name: str + + #: An identifier that groups related events together. + instance_id: str + + #: A list of event-specific information. + event_metadata: typing.List[EventMetadata] + + #: Storage key this event belongs to. + storage_key: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["timestamp"] = self.timestamp.to_json() + json["origin"] = self.origin + json["serviceWorkerRegistrationId"] = ( + self.service_worker_registration_id.to_json() + ) + json["service"] = self.service.to_json() + json["eventName"] = self.event_name + json["instanceId"] = self.instance_id + json["eventMetadata"] = [i.to_json() for i in self.event_metadata] + json["storageKey"] = self.storage_key + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEvent: + return cls( + timestamp=network.TimeSinceEpoch.from_json(json["timestamp"]), + origin=str(json["origin"]), + service_worker_registration_id=service_worker.RegistrationID.from_json( + json["serviceWorkerRegistrationId"] + ), + service=ServiceName.from_json(json["service"]), + event_name=str(json["eventName"]), + instance_id=str(json["instanceId"]), + event_metadata=[EventMetadata.from_json(i) for i in json["eventMetadata"]], + storage_key=str(json["storageKey"]), + )
+ + + +
+[docs] +def start_observing( + service: ServiceName, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables event updates for the service. + + :param service: + """ + params: T_JSON_DICT = dict() + params["service"] = service.to_json() + cmd_dict: T_JSON_DICT = { + "method": "BackgroundService.startObserving", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_observing( + service: ServiceName, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables event updates for the service. + + :param service: + """ + params: T_JSON_DICT = dict() + params["service"] = service.to_json() + cmd_dict: T_JSON_DICT = { + "method": "BackgroundService.stopObserving", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_recording( + should_record: bool, service: ServiceName +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set the recording state for the service. + + :param should_record: + :param service: + """ + params: T_JSON_DICT = dict() + params["shouldRecord"] = should_record + params["service"] = service.to_json() + cmd_dict: T_JSON_DICT = { + "method": "BackgroundService.setRecording", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_events( + service: ServiceName, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears all stored data for the service. + + :param service: + """ + params: T_JSON_DICT = dict() + params["service"] = service.to_json() + cmd_dict: T_JSON_DICT = { + "method": "BackgroundService.clearEvents", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("BackgroundService.recordingStateChanged") +@dataclass +class RecordingStateChanged: + """ + Called when the recording state for the service has been updated. + """ + + is_recording: bool + service: ServiceName + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged: + return cls( + is_recording=bool(json["isRecording"]), + service=ServiceName.from_json(json["service"]), + )
+ + + +
+[docs] +@event_class("BackgroundService.backgroundServiceEventReceived") +@dataclass +class BackgroundServiceEventReceived: + """ + Called with all existing backgroundServiceEvents when enabled, and all new + events afterwards if enabled and recording. + """ + + background_service_event: BackgroundServiceEvent + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived: + return cls( + background_service_event=BackgroundServiceEvent.from_json( + json["backgroundServiceEvent"] + ) + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/browser.html b/docs/_build/html/_modules/nodriver/cdp/browser.html new file mode 100644 index 0000000..96e474c --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/browser.html @@ -0,0 +1,1145 @@ + + + + + + + + nodriver.cdp.browser - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.browser

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Browser
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import page
+from . import target
+
+
+
+[docs] +class BrowserContextID(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> BrowserContextID: + return cls(json) + + def __repr__(self): + return "BrowserContextID({})".format(super().__repr__())
+ + + +
+[docs] +class WindowID(int): + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> WindowID: + return cls(json) + + def __repr__(self): + return "WindowID({})".format(super().__repr__())
+ + + +
+[docs] +class WindowState(enum.Enum): + """ + The state of the browser window. + """ + + NORMAL = "normal" + MINIMIZED = "minimized" + MAXIMIZED = "maximized" + FULLSCREEN = "fullscreen" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> WindowState: + return cls(json)
+ + + +
+[docs] +@dataclass +class Bounds: + """ + Browser window bounds information + """ + + #: The offset from the left edge of the screen to the window in pixels. + left: typing.Optional[int] = None + + #: The offset from the top edge of the screen to the window in pixels. + top: typing.Optional[int] = None + + #: The window width in pixels. + width: typing.Optional[int] = None + + #: The window height in pixels. + height: typing.Optional[int] = None + + #: The window state. Default to normal. + window_state: typing.Optional[WindowState] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.left is not None: + json["left"] = self.left + if self.top is not None: + json["top"] = self.top + if self.width is not None: + json["width"] = self.width + if self.height is not None: + json["height"] = self.height + if self.window_state is not None: + json["windowState"] = self.window_state.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Bounds: + return cls( + left=int(json["left"]) if json.get("left", None) is not None else None, + top=int(json["top"]) if json.get("top", None) is not None else None, + width=int(json["width"]) if json.get("width", None) is not None else None, + height=( + int(json["height"]) if json.get("height", None) is not None else None + ), + window_state=( + WindowState.from_json(json["windowState"]) + if json.get("windowState", None) is not None + else None + ), + )
+ + + +
+[docs] +class PermissionType(enum.Enum): + ACCESSIBILITY_EVENTS = "accessibilityEvents" + AUDIO_CAPTURE = "audioCapture" + BACKGROUND_SYNC = "backgroundSync" + BACKGROUND_FETCH = "backgroundFetch" + CAPTURED_SURFACE_CONTROL = "capturedSurfaceControl" + CLIPBOARD_READ_WRITE = "clipboardReadWrite" + CLIPBOARD_SANITIZED_WRITE = "clipboardSanitizedWrite" + DISPLAY_CAPTURE = "displayCapture" + DURABLE_STORAGE = "durableStorage" + FLASH = "flash" + GEOLOCATION = "geolocation" + IDLE_DETECTION = "idleDetection" + LOCAL_FONTS = "localFonts" + MIDI = "midi" + MIDI_SYSEX = "midiSysex" + NFC = "nfc" + NOTIFICATIONS = "notifications" + PAYMENT_HANDLER = "paymentHandler" + PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync" + PROTECTED_MEDIA_IDENTIFIER = "protectedMediaIdentifier" + SENSORS = "sensors" + STORAGE_ACCESS = "storageAccess" + SPEAKER_SELECTION = "speakerSelection" + TOP_LEVEL_STORAGE_ACCESS = "topLevelStorageAccess" + VIDEO_CAPTURE = "videoCapture" + VIDEO_CAPTURE_PAN_TILT_ZOOM = "videoCapturePanTiltZoom" + WAKE_LOCK_SCREEN = "wakeLockScreen" + WAKE_LOCK_SYSTEM = "wakeLockSystem" + WINDOW_MANAGEMENT = "windowManagement" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PermissionType: + return cls(json)
+ + + +
+[docs] +class PermissionSetting(enum.Enum): + GRANTED = "granted" + DENIED = "denied" + PROMPT = "prompt" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PermissionSetting: + return cls(json)
+ + + +
+[docs] +@dataclass +class PermissionDescriptor: + """ + Definition of PermissionDescriptor defined in the Permissions API: + https://w3c.github.io/permissions/#dom-permissiondescriptor. + """ + + #: Name of permission. + #: See https://cs.chromium.org/chromium/src/third_party/blink/renderer/modules/permissions/permission_descriptor.idl for valid permission names. + name: str + + #: For "midi" permission, may also specify sysex control. + sysex: typing.Optional[bool] = None + + #: For "push" permission, may specify userVisibleOnly. + #: Note that userVisibleOnly = true is the only currently supported type. + user_visible_only: typing.Optional[bool] = None + + #: For "clipboard" permission, may specify allowWithoutSanitization. + allow_without_sanitization: typing.Optional[bool] = None + + #: For "fullscreen" permission, must specify allowWithoutGesture:true. + allow_without_gesture: typing.Optional[bool] = None + + #: For "camera" permission, may specify panTiltZoom. + pan_tilt_zoom: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + if self.sysex is not None: + json["sysex"] = self.sysex + if self.user_visible_only is not None: + json["userVisibleOnly"] = self.user_visible_only + if self.allow_without_sanitization is not None: + json["allowWithoutSanitization"] = self.allow_without_sanitization + if self.allow_without_gesture is not None: + json["allowWithoutGesture"] = self.allow_without_gesture + if self.pan_tilt_zoom is not None: + json["panTiltZoom"] = self.pan_tilt_zoom + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PermissionDescriptor: + return cls( + name=str(json["name"]), + sysex=bool(json["sysex"]) if json.get("sysex", None) is not None else None, + user_visible_only=( + bool(json["userVisibleOnly"]) + if json.get("userVisibleOnly", None) is not None + else None + ), + allow_without_sanitization=( + bool(json["allowWithoutSanitization"]) + if json.get("allowWithoutSanitization", None) is not None + else None + ), + allow_without_gesture=( + bool(json["allowWithoutGesture"]) + if json.get("allowWithoutGesture", None) is not None + else None + ), + pan_tilt_zoom=( + bool(json["panTiltZoom"]) + if json.get("panTiltZoom", None) is not None + else None + ), + )
+ + + +
+[docs] +class BrowserCommandId(enum.Enum): + """ + Browser command ids used by executeBrowserCommand. + """ + + OPEN_TAB_SEARCH = "openTabSearch" + CLOSE_TAB_SEARCH = "closeTabSearch" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> BrowserCommandId: + return cls(json)
+ + + +
+[docs] +@dataclass +class Bucket: + """ + Chrome histogram bucket. + """ + + #: Minimum value (inclusive). + low: int + + #: Maximum value (exclusive). + high: int + + #: Number of samples. + count: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["low"] = self.low + json["high"] = self.high + json["count"] = self.count + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Bucket: + return cls( + low=int(json["low"]), + high=int(json["high"]), + count=int(json["count"]), + )
+ + + +
+[docs] +@dataclass +class Histogram: + """ + Chrome histogram. + """ + + #: Name. + name: str + + #: Sum of sample values. + sum_: int + + #: Total number of samples. + count: int + + #: Buckets. + buckets: typing.List[Bucket] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["sum"] = self.sum_ + json["count"] = self.count + json["buckets"] = [i.to_json() for i in self.buckets] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Histogram: + return cls( + name=str(json["name"]), + sum_=int(json["sum"]), + count=int(json["count"]), + buckets=[Bucket.from_json(i) for i in json["buckets"]], + )
+ + + +
+[docs] +def set_permission( + permission: PermissionDescriptor, + setting: PermissionSetting, + origin: typing.Optional[str] = None, + browser_context_id: typing.Optional[BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set permission settings for given origin. + + **EXPERIMENTAL** + + :param permission: Descriptor of permission to override. + :param setting: Setting of the permission. + :param origin: *(Optional)* Origin the permission applies to, all origins if not specified. + :param browser_context_id: *(Optional)* Context to override. When omitted, default browser context is used. + """ + params: T_JSON_DICT = dict() + params["permission"] = permission.to_json() + params["setting"] = setting.to_json() + if origin is not None: + params["origin"] = origin + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.setPermission", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def grant_permissions( + permissions: typing.List[PermissionType], + origin: typing.Optional[str] = None, + browser_context_id: typing.Optional[BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Grant specific permissions to the given origin and reject all others. + + **EXPERIMENTAL** + + :param permissions: + :param origin: *(Optional)* Origin the permission applies to, all origins if not specified. + :param browser_context_id: *(Optional)* BrowserContext to override permissions. When omitted, default browser context is used. + """ + params: T_JSON_DICT = dict() + params["permissions"] = [i.to_json() for i in permissions] + if origin is not None: + params["origin"] = origin + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.grantPermissions", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def reset_permissions( + browser_context_id: typing.Optional[BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Reset all permission management for all origins. + + :param browser_context_id: *(Optional)* BrowserContext to reset permissions. When omitted, default browser context is used. + """ + params: T_JSON_DICT = dict() + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.resetPermissions", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_download_behavior( + behavior: str, + browser_context_id: typing.Optional[BrowserContextID] = None, + download_path: typing.Optional[str] = None, + events_enabled: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set the behavior when downloading a file. + + **EXPERIMENTAL** + + :param behavior: Whether to allow all or deny all download requests, or use default Chrome behavior if available (otherwise deny). ``allowAndName`` allows download and names files according to their download guids. + :param browser_context_id: *(Optional)* BrowserContext to set download behavior. When omitted, default browser context is used. + :param download_path: *(Optional)* The default path to save downloaded files to. This is required if behavior is set to 'allow' or 'allowAndName'. + :param events_enabled: *(Optional)* Whether to emit download events (defaults to false). + """ + params: T_JSON_DICT = dict() + params["behavior"] = behavior + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + if download_path is not None: + params["downloadPath"] = download_path + if events_enabled is not None: + params["eventsEnabled"] = events_enabled + cmd_dict: T_JSON_DICT = { + "method": "Browser.setDownloadBehavior", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def cancel_download( + guid: str, browser_context_id: typing.Optional[BrowserContextID] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Cancel a download if in progress + + **EXPERIMENTAL** + + :param guid: Global unique identifier of the download. + :param browser_context_id: *(Optional)* BrowserContext to perform the action in. When omitted, default browser context is used. + """ + params: T_JSON_DICT = dict() + params["guid"] = guid + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.cancelDownload", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def close() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Close browser gracefully. + """ + cmd_dict: T_JSON_DICT = { + "method": "Browser.close", + } + json = yield cmd_dict
+ + + +
+[docs] +def crash() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Crashes browser on the main thread. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Browser.crash", + } + json = yield cmd_dict
+ + + +
+[docs] +def crash_gpu_process() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Crashes GPU process. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Browser.crashGpuProcess", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_version() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, str, str, str, str]] +): + """ + Returns version information. + + :returns: A tuple with the following items: + + 0. **protocolVersion** - Protocol version. + 1. **product** - Product name. + 2. **revision** - Product revision. + 3. **userAgent** - User-Agent. + 4. **jsVersion** - V8 version. + """ + cmd_dict: T_JSON_DICT = { + "method": "Browser.getVersion", + } + json = yield cmd_dict + return ( + str(json["protocolVersion"]), + str(json["product"]), + str(json["revision"]), + str(json["userAgent"]), + str(json["jsVersion"]), + )
+ + + +
+[docs] +def get_browser_command_line() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]] +): + """ + Returns the command line switches for the browser process if, and only if + --enable-automation is on the commandline. + + **EXPERIMENTAL** + + :returns: Commandline parameters + """ + cmd_dict: T_JSON_DICT = { + "method": "Browser.getBrowserCommandLine", + } + json = yield cmd_dict + return [str(i) for i in json["arguments"]]
+ + + +
+[docs] +def get_histograms( + query: typing.Optional[str] = None, delta: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Histogram]]: + """ + Get Chrome histograms. + + **EXPERIMENTAL** + + :param query: *(Optional)* Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms. + :param delta: *(Optional)* If true, retrieve delta since last delta call. + :returns: Histograms. + """ + params: T_JSON_DICT = dict() + if query is not None: + params["query"] = query + if delta is not None: + params["delta"] = delta + cmd_dict: T_JSON_DICT = { + "method": "Browser.getHistograms", + "params": params, + } + json = yield cmd_dict + return [Histogram.from_json(i) for i in json["histograms"]]
+ + + +
+[docs] +def get_histogram( + name: str, delta: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Histogram]: + """ + Get a Chrome histogram by name. + + **EXPERIMENTAL** + + :param name: Requested histogram name. + :param delta: *(Optional)* If true, retrieve delta since last delta call. + :returns: Histogram. + """ + params: T_JSON_DICT = dict() + params["name"] = name + if delta is not None: + params["delta"] = delta + cmd_dict: T_JSON_DICT = { + "method": "Browser.getHistogram", + "params": params, + } + json = yield cmd_dict + return Histogram.from_json(json["histogram"])
+ + + +
+[docs] +def get_window_bounds( + window_id: WindowID, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Bounds]: + """ + Get position and size of the browser window. + + **EXPERIMENTAL** + + :param window_id: Browser window id. + :returns: Bounds information of the window. When window state is 'minimized', the restored window position and size are returned. + """ + params: T_JSON_DICT = dict() + params["windowId"] = window_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.getWindowBounds", + "params": params, + } + json = yield cmd_dict + return Bounds.from_json(json["bounds"])
+ + + +
+[docs] +def get_window_for_target( + target_id: typing.Optional[target.TargetID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[WindowID, Bounds]]: + """ + Get the browser window that contains the devtools target. + + **EXPERIMENTAL** + + :param target_id: *(Optional)* Devtools agent host id. If called as a part of the session, associated targetId is used. + :returns: A tuple with the following items: + + 0. **windowId** - Browser window id. + 1. **bounds** - Bounds information of the window. When window state is 'minimized', the restored window position and size are returned. + """ + params: T_JSON_DICT = dict() + if target_id is not None: + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.getWindowForTarget", + "params": params, + } + json = yield cmd_dict + return (WindowID.from_json(json["windowId"]), Bounds.from_json(json["bounds"]))
+ + + +
+[docs] +def set_window_bounds( + window_id: WindowID, bounds: Bounds +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set position and/or size of the browser window. + + **EXPERIMENTAL** + + :param window_id: Browser window id. + :param bounds: New window bounds. The 'minimized', 'maximized' and 'fullscreen' states cannot be combined with 'left', 'top', 'width' or 'height'. Leaves unspecified fields unchanged. + """ + params: T_JSON_DICT = dict() + params["windowId"] = window_id.to_json() + params["bounds"] = bounds.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.setWindowBounds", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_dock_tile( + badge_label: typing.Optional[str] = None, image: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set dock tile details, platform-specific. + + **EXPERIMENTAL** + + :param badge_label: *(Optional)* + :param image: *(Optional)* Png encoded image. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + if badge_label is not None: + params["badgeLabel"] = badge_label + if image is not None: + params["image"] = image + cmd_dict: T_JSON_DICT = { + "method": "Browser.setDockTile", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def execute_browser_command( + command_id: BrowserCommandId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Invoke custom browser commands used by telemetry. + + **EXPERIMENTAL** + + :param command_id: + """ + params: T_JSON_DICT = dict() + params["commandId"] = command_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Browser.executeBrowserCommand", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def add_privacy_sandbox_enrollment_override( + url: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Allows a site to use privacy sandbox features that require enrollment + without the site actually being enrolled. Only supported on page targets. + + :param url: + """ + params: T_JSON_DICT = dict() + params["url"] = url + cmd_dict: T_JSON_DICT = { + "method": "Browser.addPrivacySandboxEnrollmentOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Browser.downloadWillBegin") +@dataclass +class DownloadWillBegin: + """ + **EXPERIMENTAL** + + Fired when page is about to start a download. + """ + + #: Id of the frame that caused the download to begin. + frame_id: page.FrameId + #: Global unique identifier of the download. + guid: str + #: URL of the resource being downloaded. + url: str + #: Suggested file name of the resource (the actual name of the file saved on disk may differ). + suggested_filename: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DownloadWillBegin: + return cls( + frame_id=page.FrameId.from_json(json["frameId"]), + guid=str(json["guid"]), + url=str(json["url"]), + suggested_filename=str(json["suggestedFilename"]), + )
+ + + +
+[docs] +@event_class("Browser.downloadProgress") +@dataclass +class DownloadProgress: + """ + **EXPERIMENTAL** + + Fired when download makes progress. Last call has ``done`` == true. + """ + + #: Global unique identifier of the download. + guid: str + #: Total expected bytes to download. + total_bytes: float + #: Total bytes received. + received_bytes: float + #: Download status. + state: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DownloadProgress: + return cls( + guid=str(json["guid"]), + total_bytes=float(json["totalBytes"]), + received_bytes=float(json["receivedBytes"]), + state=str(json["state"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/cache_storage.html b/docs/_build/html/_modules/nodriver/cdp/cache_storage.html new file mode 100644 index 0000000..acddfb7 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/cache_storage.html @@ -0,0 +1,652 @@ + + + + + + + + nodriver.cdp.cache_storage - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.cache_storage

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: CacheStorage (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import storage
+
+
+
+[docs] +class CacheId(str): + """ + Unique identifier of the Cache object. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> CacheId: + return cls(json) + + def __repr__(self): + return "CacheId({})".format(super().__repr__())
+ + + +
+[docs] +class CachedResponseType(enum.Enum): + """ + type of HTTP response cached + """ + + BASIC = "basic" + CORS = "cors" + DEFAULT = "default" + ERROR = "error" + OPAQUE_RESPONSE = "opaqueResponse" + OPAQUE_REDIRECT = "opaqueRedirect" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CachedResponseType: + return cls(json)
+ + + +
+[docs] +@dataclass +class DataEntry: + """ + Data entry. + """ + + #: Request URL. + request_url: str + + #: Request method. + request_method: str + + #: Request headers + request_headers: typing.List[Header] + + #: Number of seconds since epoch. + response_time: float + + #: HTTP response status code. + response_status: int + + #: HTTP response status text. + response_status_text: str + + #: HTTP response type + response_type: CachedResponseType + + #: Response headers + response_headers: typing.List[Header] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["requestURL"] = self.request_url + json["requestMethod"] = self.request_method + json["requestHeaders"] = [i.to_json() for i in self.request_headers] + json["responseTime"] = self.response_time + json["responseStatus"] = self.response_status + json["responseStatusText"] = self.response_status_text + json["responseType"] = self.response_type.to_json() + json["responseHeaders"] = [i.to_json() for i in self.response_headers] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DataEntry: + return cls( + request_url=str(json["requestURL"]), + request_method=str(json["requestMethod"]), + request_headers=[Header.from_json(i) for i in json["requestHeaders"]], + response_time=float(json["responseTime"]), + response_status=int(json["responseStatus"]), + response_status_text=str(json["responseStatusText"]), + response_type=CachedResponseType.from_json(json["responseType"]), + response_headers=[Header.from_json(i) for i in json["responseHeaders"]], + )
+ + + +
+[docs] +@dataclass +class Cache: + """ + Cache identifier. + """ + + #: An opaque unique id of the cache. + cache_id: CacheId + + #: Security origin of the cache. + security_origin: str + + #: Storage key of the cache. + storage_key: str + + #: The name of the cache. + cache_name: str + + #: Storage bucket of the cache. + storage_bucket: typing.Optional[storage.StorageBucket] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["cacheId"] = self.cache_id.to_json() + json["securityOrigin"] = self.security_origin + json["storageKey"] = self.storage_key + json["cacheName"] = self.cache_name + if self.storage_bucket is not None: + json["storageBucket"] = self.storage_bucket.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Cache: + return cls( + cache_id=CacheId.from_json(json["cacheId"]), + security_origin=str(json["securityOrigin"]), + storage_key=str(json["storageKey"]), + cache_name=str(json["cacheName"]), + storage_bucket=( + storage.StorageBucket.from_json(json["storageBucket"]) + if json.get("storageBucket", None) is not None + else None + ), + )
+ + + + + + + +
+[docs] +@dataclass +class CachedResponse: + """ + Cached response + """ + + #: Entry content, base64-encoded. (Encoded as a base64 string when passed over JSON) + body: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["body"] = self.body + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CachedResponse: + return cls( + body=str(json["body"]), + )
+ + + +
+[docs] +def delete_cache(cache_id: CacheId) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes a cache. + + :param cache_id: Id of cache for deletion. + """ + params: T_JSON_DICT = dict() + params["cacheId"] = cache_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CacheStorage.deleteCache", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_entry( + cache_id: CacheId, request: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes a cache entry. + + :param cache_id: Id of cache where the entry will be deleted. + :param request: URL spec of the request. + """ + params: T_JSON_DICT = dict() + params["cacheId"] = cache_id.to_json() + params["request"] = request + cmd_dict: T_JSON_DICT = { + "method": "CacheStorage.deleteEntry", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def request_cache_names( + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Cache]]: + """ + Requests cache names. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :returns: Caches for the security origin. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CacheStorage.requestCacheNames", + "params": params, + } + json = yield cmd_dict + return [Cache.from_json(i) for i in json["caches"]]
+ + + +
+[docs] +def request_cached_response( + cache_id: CacheId, request_url: str, request_headers: typing.List[Header] +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CachedResponse]: + """ + Fetches cache entry. + + :param cache_id: Id of cache that contains the entry. + :param request_url: URL spec of the request. + :param request_headers: headers of the request. + :returns: Response read from the cache. + """ + params: T_JSON_DICT = dict() + params["cacheId"] = cache_id.to_json() + params["requestURL"] = request_url + params["requestHeaders"] = [i.to_json() for i in request_headers] + cmd_dict: T_JSON_DICT = { + "method": "CacheStorage.requestCachedResponse", + "params": params, + } + json = yield cmd_dict + return CachedResponse.from_json(json["response"])
+ + + +
+[docs] +def request_entries( + cache_id: CacheId, + skip_count: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + path_filter: typing.Optional[str] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.List[DataEntry], float] +]: + """ + Requests data from cache. + + :param cache_id: ID of cache to get entries from. + :param skip_count: *(Optional)* Number of records to skip. + :param page_size: *(Optional)* Number of records to fetch. + :param path_filter: *(Optional)* If present, only return the entries containing this substring in the path + :returns: A tuple with the following items: + + 0. **cacheDataEntries** - Array of object store data entries. + 1. **returnCount** - Count of returned entries from this storage. If pathFilter is empty, it is the count of all entries from this storage. + """ + params: T_JSON_DICT = dict() + params["cacheId"] = cache_id.to_json() + if skip_count is not None: + params["skipCount"] = skip_count + if page_size is not None: + params["pageSize"] = page_size + if path_filter is not None: + params["pathFilter"] = path_filter + cmd_dict: T_JSON_DICT = { + "method": "CacheStorage.requestEntries", + "params": params, + } + json = yield cmd_dict + return ( + [DataEntry.from_json(i) for i in json["cacheDataEntries"]], + float(json["returnCount"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/cast.html b/docs/_build/html/_modules/nodriver/cdp/cast.html new file mode 100644 index 0000000..5181bae --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/cast.html @@ -0,0 +1,497 @@ + + + + + + + + nodriver.cdp.cast - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.cast

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Cast (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +@dataclass +class Sink: + name: str + + id_: str + + #: Text describing the current session. Present only if there is an active + #: session on the sink. + session: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["id"] = self.id_ + if self.session is not None: + json["session"] = self.session + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Sink: + return cls( + name=str(json["name"]), + id_=str(json["id"]), + session=( + str(json["session"]) if json.get("session", None) is not None else None + ), + )
+ + + +
+[docs] +def enable( + presentation_url: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Starts observing for sinks that can be used for tab mirroring, and if set, + sinks compatible with ``presentationUrl`` as well. When sinks are found, a + ``sinksUpdated`` event is fired. + Also starts observing for issue messages. When an issue is added or removed, + an ``issueUpdated`` event is fired. + + :param presentation_url: *(Optional)* + """ + params: T_JSON_DICT = dict() + if presentation_url is not None: + params["presentationUrl"] = presentation_url + cmd_dict: T_JSON_DICT = { + "method": "Cast.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stops observing for sinks and issues. + """ + cmd_dict: T_JSON_DICT = { + "method": "Cast.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_sink_to_use(sink_name: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets a sink to be used when the web page requests the browser to choose a + sink via Presentation API, Remote Playback API, or Cast SDK. + + :param sink_name: + """ + params: T_JSON_DICT = dict() + params["sinkName"] = sink_name + cmd_dict: T_JSON_DICT = { + "method": "Cast.setSinkToUse", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_desktop_mirroring( + sink_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Starts mirroring the desktop to the sink. + + :param sink_name: + """ + params: T_JSON_DICT = dict() + params["sinkName"] = sink_name + cmd_dict: T_JSON_DICT = { + "method": "Cast.startDesktopMirroring", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_tab_mirroring( + sink_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Starts mirroring the tab to the sink. + + :param sink_name: + """ + params: T_JSON_DICT = dict() + params["sinkName"] = sink_name + cmd_dict: T_JSON_DICT = { + "method": "Cast.startTabMirroring", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_casting(sink_name: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stops the active Cast session on the sink. + + :param sink_name: + """ + params: T_JSON_DICT = dict() + params["sinkName"] = sink_name + cmd_dict: T_JSON_DICT = { + "method": "Cast.stopCasting", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Cast.sinksUpdated") +@dataclass +class SinksUpdated: + """ + This is fired whenever the list of available sinks changes. A sink is a + device or a software surface that you can cast to. + """ + + sinks: typing.List[Sink] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SinksUpdated: + return cls(sinks=[Sink.from_json(i) for i in json["sinks"]])
+ + + +
+[docs] +@event_class("Cast.issueUpdated") +@dataclass +class IssueUpdated: + """ + This is fired whenever the outstanding issue/error message changes. + ``issueMessage`` is empty if there is no issue. + """ + + issue_message: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IssueUpdated: + return cls(issue_message=str(json["issueMessage"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/console.html b/docs/_build/html/_modules/nodriver/cdp/console.html new file mode 100644 index 0000000..1835694 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/console.html @@ -0,0 +1,426 @@ + + + + + + + + nodriver.cdp.console - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.console

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Console
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +@dataclass +class ConsoleMessage: + """ + Console message. + """ + + #: Message source. + source: str + + #: Message severity. + level: str + + #: Message text. + text: str + + #: URL of the message origin. + url: typing.Optional[str] = None + + #: Line number in the resource that generated this message (1-based). + line: typing.Optional[int] = None + + #: Column number in the resource that generated this message (1-based). + column: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["source"] = self.source + json["level"] = self.level + json["text"] = self.text + if self.url is not None: + json["url"] = self.url + if self.line is not None: + json["line"] = self.line + if self.column is not None: + json["column"] = self.column + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ConsoleMessage: + return cls( + source=str(json["source"]), + level=str(json["level"]), + text=str(json["text"]), + url=str(json["url"]) if json.get("url", None) is not None else None, + line=int(json["line"]) if json.get("line", None) is not None else None, + column=( + int(json["column"]) if json.get("column", None) is not None else None + ), + )
+ + + +
+[docs] +def clear_messages() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Does nothing. + """ + cmd_dict: T_JSON_DICT = { + "method": "Console.clearMessages", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables console domain, prevents further console messages from being reported to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Console.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables console domain, sends the messages collected so far to the client by means of the + ``messageAdded`` notification. + """ + cmd_dict: T_JSON_DICT = { + "method": "Console.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Console.messageAdded") +@dataclass +class MessageAdded: + """ + Issued when new console message is added. + """ + + #: Console message that has been added. + message: ConsoleMessage + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MessageAdded: + return cls(message=ConsoleMessage.from_json(json["message"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/css.html b/docs/_build/html/_modules/nodriver/cdp/css.html new file mode 100644 index 0000000..0b580a6 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/css.html @@ -0,0 +1,3060 @@ + + + + + + + + nodriver.cdp.css - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.css

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: CSS (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import page
+
+
+
+[docs] +class StyleSheetId(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> StyleSheetId: + return cls(json) + + def __repr__(self): + return "StyleSheetId({})".format(super().__repr__())
+ + + +
+[docs] +class StyleSheetOrigin(enum.Enum): + """ + Stylesheet type: "injected" for stylesheets injected via extension, "user-agent" for user-agent + stylesheets, "inspector" for stylesheets created by the inspector (i.e. those holding the "via + inspector" rules), "regular" for regular stylesheets. + """ + + INJECTED = "injected" + USER_AGENT = "user-agent" + INSPECTOR = "inspector" + REGULAR = "regular" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StyleSheetOrigin: + return cls(json)
+ + + +
+[docs] +@dataclass +class PseudoElementMatches: + """ + CSS rule collection for a single pseudo style. + """ + + #: Pseudo element type. + pseudo_type: dom.PseudoType + + #: Matches of CSS rules applicable to the pseudo style. + matches: typing.List[RuleMatch] + + #: Pseudo element custom ident. + pseudo_identifier: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["pseudoType"] = self.pseudo_type.to_json() + json["matches"] = [i.to_json() for i in self.matches] + if self.pseudo_identifier is not None: + json["pseudoIdentifier"] = self.pseudo_identifier + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PseudoElementMatches: + return cls( + pseudo_type=dom.PseudoType.from_json(json["pseudoType"]), + matches=[RuleMatch.from_json(i) for i in json["matches"]], + pseudo_identifier=( + str(json["pseudoIdentifier"]) + if json.get("pseudoIdentifier", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class InheritedStyleEntry: + """ + Inherited CSS rule collection from ancestor node. + """ + + #: Matches of CSS rules matching the ancestor node in the style inheritance chain. + matched_css_rules: typing.List[RuleMatch] + + #: The ancestor node's inline style, if any, in the style inheritance chain. + inline_style: typing.Optional[CSSStyle] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["matchedCSSRules"] = [i.to_json() for i in self.matched_css_rules] + if self.inline_style is not None: + json["inlineStyle"] = self.inline_style.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InheritedStyleEntry: + return cls( + matched_css_rules=[RuleMatch.from_json(i) for i in json["matchedCSSRules"]], + inline_style=( + CSSStyle.from_json(json["inlineStyle"]) + if json.get("inlineStyle", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class InheritedPseudoElementMatches: + """ + Inherited pseudo element matches from pseudos of an ancestor node. + """ + + #: Matches of pseudo styles from the pseudos of an ancestor node. + pseudo_elements: typing.List[PseudoElementMatches] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["pseudoElements"] = [i.to_json() for i in self.pseudo_elements] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InheritedPseudoElementMatches: + return cls( + pseudo_elements=[ + PseudoElementMatches.from_json(i) for i in json["pseudoElements"] + ], + )
+ + + +
+[docs] +@dataclass +class RuleMatch: + """ + Match data for a CSS rule. + """ + + #: CSS rule in the match. + rule: CSSRule + + #: Matching selector indices in the rule's selectorList selectors (0-based). + matching_selectors: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["rule"] = self.rule.to_json() + json["matchingSelectors"] = [i for i in self.matching_selectors] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RuleMatch: + return cls( + rule=CSSRule.from_json(json["rule"]), + matching_selectors=[int(i) for i in json["matchingSelectors"]], + )
+ + + +
+[docs] +@dataclass +class Value: + """ + Data for a simple selector (these are delimited by commas in a selector list). + """ + + #: Value text. + text: str + + #: Value range in the underlying resource (if available). + range_: typing.Optional[SourceRange] = None + + #: Specificity of the selector. + specificity: typing.Optional[Specificity] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.specificity is not None: + json["specificity"] = self.specificity.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Value: + return cls( + text=str(json["text"]), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + specificity=( + Specificity.from_json(json["specificity"]) + if json.get("specificity", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Specificity: + """ + Specificity: + https://drafts.csswg.org/selectors/#specificity-rules + """ + + #: The a component, which represents the number of ID selectors. + a: int + + #: The b component, which represents the number of class selectors, attributes selectors, and + #: pseudo-classes. + b: int + + #: The c component, which represents the number of type selectors and pseudo-elements. + c: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["a"] = self.a + json["b"] = self.b + json["c"] = self.c + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Specificity: + return cls( + a=int(json["a"]), + b=int(json["b"]), + c=int(json["c"]), + )
+ + + +
+[docs] +@dataclass +class SelectorList: + """ + Selector list data. + """ + + #: Selectors in the list. + selectors: typing.List[Value] + + #: Rule selector text. + text: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["selectors"] = [i.to_json() for i in self.selectors] + json["text"] = self.text + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SelectorList: + return cls( + selectors=[Value.from_json(i) for i in json["selectors"]], + text=str(json["text"]), + )
+ + + +
+[docs] +@dataclass +class CSSStyleSheetHeader: + """ + CSS stylesheet metainformation. + """ + + #: The stylesheet identifier. + style_sheet_id: StyleSheetId + + #: Owner frame identifier. + frame_id: page.FrameId + + #: Stylesheet resource URL. Empty if this is a constructed stylesheet created using + #: new CSSStyleSheet() (but non-empty if this is a constructed stylesheet imported + #: as a CSS module script). + source_url: str + + #: Stylesheet origin. + origin: StyleSheetOrigin + + #: Stylesheet title. + title: str + + #: Denotes whether the stylesheet is disabled. + disabled: bool + + #: Whether this stylesheet is created for STYLE tag by parser. This flag is not set for + #: document.written STYLE tags. + is_inline: bool + + #: Whether this stylesheet is mutable. Inline stylesheets become mutable + #: after they have been modified via CSSOM API. + #: ``<link>`` element's stylesheets become mutable only if DevTools modifies them. + #: Constructed stylesheets (new CSSStyleSheet()) are mutable immediately after creation. + is_mutable: bool + + #: True if this stylesheet is created through new CSSStyleSheet() or imported as a + #: CSS module script. + is_constructed: bool + + #: Line offset of the stylesheet within the resource (zero based). + start_line: float + + #: Column offset of the stylesheet within the resource (zero based). + start_column: float + + #: Size of the content (in characters). + length: float + + #: Line offset of the end of the stylesheet within the resource (zero based). + end_line: float + + #: Column offset of the end of the stylesheet within the resource (zero based). + end_column: float + + #: URL of source map associated with the stylesheet (if any). + source_map_url: typing.Optional[str] = None + + #: The backend id for the owner node of the stylesheet. + owner_node: typing.Optional[dom.BackendNodeId] = None + + #: Whether the sourceURL field value comes from the sourceURL comment. + has_source_url: typing.Optional[bool] = None + + #: If the style sheet was loaded from a network resource, this indicates when the resource failed to load + loading_failed: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["styleSheetId"] = self.style_sheet_id.to_json() + json["frameId"] = self.frame_id.to_json() + json["sourceURL"] = self.source_url + json["origin"] = self.origin.to_json() + json["title"] = self.title + json["disabled"] = self.disabled + json["isInline"] = self.is_inline + json["isMutable"] = self.is_mutable + json["isConstructed"] = self.is_constructed + json["startLine"] = self.start_line + json["startColumn"] = self.start_column + json["length"] = self.length + json["endLine"] = self.end_line + json["endColumn"] = self.end_column + if self.source_map_url is not None: + json["sourceMapURL"] = self.source_map_url + if self.owner_node is not None: + json["ownerNode"] = self.owner_node.to_json() + if self.has_source_url is not None: + json["hasSourceURL"] = self.has_source_url + if self.loading_failed is not None: + json["loadingFailed"] = self.loading_failed + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSStyleSheetHeader: + return cls( + style_sheet_id=StyleSheetId.from_json(json["styleSheetId"]), + frame_id=page.FrameId.from_json(json["frameId"]), + source_url=str(json["sourceURL"]), + origin=StyleSheetOrigin.from_json(json["origin"]), + title=str(json["title"]), + disabled=bool(json["disabled"]), + is_inline=bool(json["isInline"]), + is_mutable=bool(json["isMutable"]), + is_constructed=bool(json["isConstructed"]), + start_line=float(json["startLine"]), + start_column=float(json["startColumn"]), + length=float(json["length"]), + end_line=float(json["endLine"]), + end_column=float(json["endColumn"]), + source_map_url=( + str(json["sourceMapURL"]) + if json.get("sourceMapURL", None) is not None + else None + ), + owner_node=( + dom.BackendNodeId.from_json(json["ownerNode"]) + if json.get("ownerNode", None) is not None + else None + ), + has_source_url=( + bool(json["hasSourceURL"]) + if json.get("hasSourceURL", None) is not None + else None + ), + loading_failed=( + bool(json["loadingFailed"]) + if json.get("loadingFailed", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSRule: + """ + CSS rule representation. + """ + + #: Rule selector data. + selector_list: SelectorList + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + #: Array of selectors from ancestor style rules, sorted by distance from the current rule. + nesting_selectors: typing.Optional[typing.List[str]] = None + + #: Media list array (for rules involving media queries). The array enumerates media queries + #: starting with the innermost one, going outwards. + media: typing.Optional[typing.List[CSSMedia]] = None + + #: Container query list array (for rules involving container queries). + #: The array enumerates container queries starting with the innermost one, going outwards. + container_queries: typing.Optional[typing.List[CSSContainerQuery]] = None + + #: @supports CSS at-rule array. + #: The array enumerates @supports at-rules starting with the innermost one, going outwards. + supports: typing.Optional[typing.List[CSSSupports]] = None + + #: Cascade layer array. Contains the layer hierarchy that this rule belongs to starting + #: with the innermost layer and going outwards. + layers: typing.Optional[typing.List[CSSLayer]] = None + + #: @scope CSS at-rule array. + #: The array enumerates @scope at-rules starting with the innermost one, going outwards. + scopes: typing.Optional[typing.List[CSSScope]] = None + + #: The array keeps the types of ancestor CSSRules from the innermost going outwards. + rule_types: typing.Optional[typing.List[CSSRuleType]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["selectorList"] = self.selector_list.to_json() + json["origin"] = self.origin.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + if self.nesting_selectors is not None: + json["nestingSelectors"] = [i for i in self.nesting_selectors] + if self.media is not None: + json["media"] = [i.to_json() for i in self.media] + if self.container_queries is not None: + json["containerQueries"] = [i.to_json() for i in self.container_queries] + if self.supports is not None: + json["supports"] = [i.to_json() for i in self.supports] + if self.layers is not None: + json["layers"] = [i.to_json() for i in self.layers] + if self.scopes is not None: + json["scopes"] = [i.to_json() for i in self.scopes] + if self.rule_types is not None: + json["ruleTypes"] = [i.to_json() for i in self.rule_types] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSRule: + return cls( + selector_list=SelectorList.from_json(json["selectorList"]), + origin=StyleSheetOrigin.from_json(json["origin"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + nesting_selectors=( + [str(i) for i in json["nestingSelectors"]] + if json.get("nestingSelectors", None) is not None + else None + ), + media=( + [CSSMedia.from_json(i) for i in json["media"]] + if json.get("media", None) is not None + else None + ), + container_queries=( + [CSSContainerQuery.from_json(i) for i in json["containerQueries"]] + if json.get("containerQueries", None) is not None + else None + ), + supports=( + [CSSSupports.from_json(i) for i in json["supports"]] + if json.get("supports", None) is not None + else None + ), + layers=( + [CSSLayer.from_json(i) for i in json["layers"]] + if json.get("layers", None) is not None + else None + ), + scopes=( + [CSSScope.from_json(i) for i in json["scopes"]] + if json.get("scopes", None) is not None + else None + ), + rule_types=( + [CSSRuleType.from_json(i) for i in json["ruleTypes"]] + if json.get("ruleTypes", None) is not None + else None + ), + )
+ + + +
+[docs] +class CSSRuleType(enum.Enum): + """ + Enum indicating the type of a CSS rule, used to represent the order of a style rule's ancestors. + This list only contains rule types that are collected during the ancestor rule collection. + """ + + MEDIA_RULE = "MediaRule" + SUPPORTS_RULE = "SupportsRule" + CONTAINER_RULE = "ContainerRule" + LAYER_RULE = "LayerRule" + SCOPE_RULE = "ScopeRule" + STYLE_RULE = "StyleRule" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CSSRuleType: + return cls(json)
+ + + +
+[docs] +@dataclass +class RuleUsage: + """ + CSS coverage information. + """ + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: StyleSheetId + + #: Offset of the start of the rule (including selector) from the beginning of the stylesheet. + start_offset: float + + #: Offset of the end of the rule body from the beginning of the stylesheet. + end_offset: float + + #: Indicates whether the rule was actually used by some element in the page. + used: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["styleSheetId"] = self.style_sheet_id.to_json() + json["startOffset"] = self.start_offset + json["endOffset"] = self.end_offset + json["used"] = self.used + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RuleUsage: + return cls( + style_sheet_id=StyleSheetId.from_json(json["styleSheetId"]), + start_offset=float(json["startOffset"]), + end_offset=float(json["endOffset"]), + used=bool(json["used"]), + )
+ + + +
+[docs] +@dataclass +class SourceRange: + """ + Text range within a resource. All numbers are zero-based. + """ + + #: Start line of range. + start_line: int + + #: Start column of range (inclusive). + start_column: int + + #: End line of range + end_line: int + + #: End column of range (exclusive). + end_column: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["startLine"] = self.start_line + json["startColumn"] = self.start_column + json["endLine"] = self.end_line + json["endColumn"] = self.end_column + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SourceRange: + return cls( + start_line=int(json["startLine"]), + start_column=int(json["startColumn"]), + end_line=int(json["endLine"]), + end_column=int(json["endColumn"]), + )
+ + + +
+[docs] +@dataclass +class ShorthandEntry: + #: Shorthand name. + name: str + + #: Shorthand value. + value: str + + #: Whether the property has "!important" annotation (implies ``false`` if absent). + important: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + if self.important is not None: + json["important"] = self.important + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ShorthandEntry: + return cls( + name=str(json["name"]), + value=str(json["value"]), + important=( + bool(json["important"]) + if json.get("important", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSComputedStyleProperty: + #: Computed style property name. + name: str + + #: Computed style property value. + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSComputedStyleProperty: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class CSSStyle: + """ + CSS style representation. + """ + + #: CSS properties in the style. + css_properties: typing.List[CSSProperty] + + #: Computed values for all shorthands found in the style. + shorthand_entries: typing.List[ShorthandEntry] + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + #: Style declaration text (if available). + css_text: typing.Optional[str] = None + + #: Style declaration range in the enclosing stylesheet (if available). + range_: typing.Optional[SourceRange] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["cssProperties"] = [i.to_json() for i in self.css_properties] + json["shorthandEntries"] = [i.to_json() for i in self.shorthand_entries] + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + if self.css_text is not None: + json["cssText"] = self.css_text + if self.range_ is not None: + json["range"] = self.range_.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSStyle: + return cls( + css_properties=[CSSProperty.from_json(i) for i in json["cssProperties"]], + shorthand_entries=[ + ShorthandEntry.from_json(i) for i in json["shorthandEntries"] + ], + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + css_text=( + str(json["cssText"]) if json.get("cssText", None) is not None else None + ), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSProperty: + """ + CSS property declaration data. + """ + + #: The property name. + name: str + + #: The property value. + value: str + + #: Whether the property has "!important" annotation (implies ``false`` if absent). + important: typing.Optional[bool] = None + + #: Whether the property is implicit (implies ``false`` if absent). + implicit: typing.Optional[bool] = None + + #: The full property text as specified in the style. + text: typing.Optional[str] = None + + #: Whether the property is understood by the browser (implies ``true`` if absent). + parsed_ok: typing.Optional[bool] = None + + #: Whether the property is disabled by the user (present for source-based properties only). + disabled: typing.Optional[bool] = None + + #: The entire property range in the enclosing style declaration (if available). + range_: typing.Optional[SourceRange] = None + + #: Parsed longhand components of this property if it is a shorthand. + #: This field will be empty if the given property is not a shorthand. + longhand_properties: typing.Optional[typing.List[CSSProperty]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + if self.important is not None: + json["important"] = self.important + if self.implicit is not None: + json["implicit"] = self.implicit + if self.text is not None: + json["text"] = self.text + if self.parsed_ok is not None: + json["parsedOk"] = self.parsed_ok + if self.disabled is not None: + json["disabled"] = self.disabled + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.longhand_properties is not None: + json["longhandProperties"] = [i.to_json() for i in self.longhand_properties] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSProperty: + return cls( + name=str(json["name"]), + value=str(json["value"]), + important=( + bool(json["important"]) + if json.get("important", None) is not None + else None + ), + implicit=( + bool(json["implicit"]) + if json.get("implicit", None) is not None + else None + ), + text=str(json["text"]) if json.get("text", None) is not None else None, + parsed_ok=( + bool(json["parsedOk"]) + if json.get("parsedOk", None) is not None + else None + ), + disabled=( + bool(json["disabled"]) + if json.get("disabled", None) is not None + else None + ), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + longhand_properties=( + [CSSProperty.from_json(i) for i in json["longhandProperties"]] + if json.get("longhandProperties", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSMedia: + """ + CSS media rule descriptor. + """ + + #: Media query text. + text: str + + #: Source of the media query: "mediaRule" if specified by a @media rule, "importRule" if + #: specified by an @import rule, "linkedSheet" if specified by a "media" attribute in a linked + #: stylesheet's LINK tag, "inlineSheet" if specified by a "media" attribute in an inline + #: stylesheet's STYLE tag. + source: str + + #: URL of the document containing the media query description. + source_url: typing.Optional[str] = None + + #: The associated rule (@media or @import) header range in the enclosing stylesheet (if + #: available). + range_: typing.Optional[SourceRange] = None + + #: Identifier of the stylesheet containing this object (if exists). + style_sheet_id: typing.Optional[StyleSheetId] = None + + #: Array of media queries. + media_list: typing.Optional[typing.List[MediaQuery]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + json["source"] = self.source + if self.source_url is not None: + json["sourceURL"] = self.source_url + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + if self.media_list is not None: + json["mediaList"] = [i.to_json() for i in self.media_list] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSMedia: + return cls( + text=str(json["text"]), + source=str(json["source"]), + source_url=( + str(json["sourceURL"]) + if json.get("sourceURL", None) is not None + else None + ), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + media_list=( + [MediaQuery.from_json(i) for i in json["mediaList"]] + if json.get("mediaList", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class MediaQuery: + """ + Media query descriptor. + """ + + #: Array of media query expressions. + expressions: typing.List[MediaQueryExpression] + + #: Whether the media query condition is satisfied. + active: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["expressions"] = [i.to_json() for i in self.expressions] + json["active"] = self.active + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MediaQuery: + return cls( + expressions=[ + MediaQueryExpression.from_json(i) for i in json["expressions"] + ], + active=bool(json["active"]), + )
+ + + +
+[docs] +@dataclass +class MediaQueryExpression: + """ + Media query expression descriptor. + """ + + #: Media query expression value. + value: float + + #: Media query expression units. + unit: str + + #: Media query expression feature. + feature: str + + #: The associated range of the value text in the enclosing stylesheet (if available). + value_range: typing.Optional[SourceRange] = None + + #: Computed length of media query expression (if applicable). + computed_length: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value + json["unit"] = self.unit + json["feature"] = self.feature + if self.value_range is not None: + json["valueRange"] = self.value_range.to_json() + if self.computed_length is not None: + json["computedLength"] = self.computed_length + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MediaQueryExpression: + return cls( + value=float(json["value"]), + unit=str(json["unit"]), + feature=str(json["feature"]), + value_range=( + SourceRange.from_json(json["valueRange"]) + if json.get("valueRange", None) is not None + else None + ), + computed_length=( + float(json["computedLength"]) + if json.get("computedLength", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSContainerQuery: + """ + CSS container query rule descriptor. + """ + + #: Container query text. + text: str + + #: The associated rule header range in the enclosing stylesheet (if + #: available). + range_: typing.Optional[SourceRange] = None + + #: Identifier of the stylesheet containing this object (if exists). + style_sheet_id: typing.Optional[StyleSheetId] = None + + #: Optional name for the container. + name: typing.Optional[str] = None + + #: Optional physical axes queried for the container. + physical_axes: typing.Optional[dom.PhysicalAxes] = None + + #: Optional logical axes queried for the container. + logical_axes: typing.Optional[dom.LogicalAxes] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + if self.name is not None: + json["name"] = self.name + if self.physical_axes is not None: + json["physicalAxes"] = self.physical_axes.to_json() + if self.logical_axes is not None: + json["logicalAxes"] = self.logical_axes.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSContainerQuery: + return cls( + text=str(json["text"]), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + name=str(json["name"]) if json.get("name", None) is not None else None, + physical_axes=( + dom.PhysicalAxes.from_json(json["physicalAxes"]) + if json.get("physicalAxes", None) is not None + else None + ), + logical_axes=( + dom.LogicalAxes.from_json(json["logicalAxes"]) + if json.get("logicalAxes", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSSupports: + """ + CSS Supports at-rule descriptor. + """ + + #: Supports rule text. + text: str + + #: Whether the supports condition is satisfied. + active: bool + + #: The associated rule header range in the enclosing stylesheet (if + #: available). + range_: typing.Optional[SourceRange] = None + + #: Identifier of the stylesheet containing this object (if exists). + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + json["active"] = self.active + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSSupports: + return cls( + text=str(json["text"]), + active=bool(json["active"]), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSScope: + """ + CSS Scope at-rule descriptor. + """ + + #: Scope rule text. + text: str + + #: The associated rule header range in the enclosing stylesheet (if + #: available). + range_: typing.Optional[SourceRange] = None + + #: Identifier of the stylesheet containing this object (if exists). + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSScope: + return cls( + text=str(json["text"]), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSLayer: + """ + CSS Layer at-rule descriptor. + """ + + #: Layer name. + text: str + + #: The associated rule header range in the enclosing stylesheet (if + #: available). + range_: typing.Optional[SourceRange] = None + + #: Identifier of the stylesheet containing this object (if exists). + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["text"] = self.text + if self.range_ is not None: + json["range"] = self.range_.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSLayer: + return cls( + text=str(json["text"]), + range_=( + SourceRange.from_json(json["range"]) + if json.get("range", None) is not None + else None + ), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSLayerData: + """ + CSS Layer data. + """ + + #: Layer name. + name: str + + #: Layer order. The order determines the order of the layer in the cascade order. + #: A higher number has higher priority in the cascade order. + order: float + + #: Direct sub-layers + sub_layers: typing.Optional[typing.List[CSSLayerData]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["order"] = self.order + if self.sub_layers is not None: + json["subLayers"] = [i.to_json() for i in self.sub_layers] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSLayerData: + return cls( + name=str(json["name"]), + order=float(json["order"]), + sub_layers=( + [CSSLayerData.from_json(i) for i in json["subLayers"]] + if json.get("subLayers", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PlatformFontUsage: + """ + Information about amount of glyphs that were rendered with given font. + """ + + #: Font's family name reported by platform. + family_name: str + + #: Font's PostScript name reported by platform. + post_script_name: str + + #: Indicates if the font was downloaded or resolved locally. + is_custom_font: bool + + #: Amount of glyphs that were rendered with this font. + glyph_count: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["familyName"] = self.family_name + json["postScriptName"] = self.post_script_name + json["isCustomFont"] = self.is_custom_font + json["glyphCount"] = self.glyph_count + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlatformFontUsage: + return cls( + family_name=str(json["familyName"]), + post_script_name=str(json["postScriptName"]), + is_custom_font=bool(json["isCustomFont"]), + glyph_count=float(json["glyphCount"]), + )
+ + + +
+[docs] +@dataclass +class FontVariationAxis: + """ + Information about font variation axes for variable fonts + """ + + #: The font-variation-setting tag (a.k.a. "axis tag"). + tag: str + + #: Human-readable variation name in the default language (normally, "en"). + name: str + + #: The minimum value (inclusive) the font supports for this tag. + min_value: float + + #: The maximum value (inclusive) the font supports for this tag. + max_value: float + + #: The default value. + default_value: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["tag"] = self.tag + json["name"] = self.name + json["minValue"] = self.min_value + json["maxValue"] = self.max_value + json["defaultValue"] = self.default_value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FontVariationAxis: + return cls( + tag=str(json["tag"]), + name=str(json["name"]), + min_value=float(json["minValue"]), + max_value=float(json["maxValue"]), + default_value=float(json["defaultValue"]), + )
+ + + +
+[docs] +@dataclass +class FontFace: + """ + Properties of a web font: https://www.w3.org/TR/2008/REC-CSS2-20080411/fonts.html#font-descriptions + and additional information such as platformFontFamily and fontVariationAxes. + """ + + #: The font-family. + font_family: str + + #: The font-style. + font_style: str + + #: The font-variant. + font_variant: str + + #: The font-weight. + font_weight: str + + #: The font-stretch. + font_stretch: str + + #: The font-display. + font_display: str + + #: The unicode-range. + unicode_range: str + + #: The src. + src: str + + #: The resolved platform font family + platform_font_family: str + + #: Available variation settings (a.k.a. "axes"). + font_variation_axes: typing.Optional[typing.List[FontVariationAxis]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["fontFamily"] = self.font_family + json["fontStyle"] = self.font_style + json["fontVariant"] = self.font_variant + json["fontWeight"] = self.font_weight + json["fontStretch"] = self.font_stretch + json["fontDisplay"] = self.font_display + json["unicodeRange"] = self.unicode_range + json["src"] = self.src + json["platformFontFamily"] = self.platform_font_family + if self.font_variation_axes is not None: + json["fontVariationAxes"] = [i.to_json() for i in self.font_variation_axes] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FontFace: + return cls( + font_family=str(json["fontFamily"]), + font_style=str(json["fontStyle"]), + font_variant=str(json["fontVariant"]), + font_weight=str(json["fontWeight"]), + font_stretch=str(json["fontStretch"]), + font_display=str(json["fontDisplay"]), + unicode_range=str(json["unicodeRange"]), + src=str(json["src"]), + platform_font_family=str(json["platformFontFamily"]), + font_variation_axes=( + [FontVariationAxis.from_json(i) for i in json["fontVariationAxes"]] + if json.get("fontVariationAxes", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSTryRule: + """ + CSS try rule representation. + """ + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSTryRule: + return cls( + origin=StyleSheetOrigin.from_json(json["origin"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSPositionFallbackRule: + """ + CSS position-fallback rule representation. + """ + + name: Value + + #: List of keyframes. + try_rules: typing.List[CSSTryRule] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name.to_json() + json["tryRules"] = [i.to_json() for i in self.try_rules] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSPositionFallbackRule: + return cls( + name=Value.from_json(json["name"]), + try_rules=[CSSTryRule.from_json(i) for i in json["tryRules"]], + )
+ + + +
+[docs] +@dataclass +class CSSPositionTryRule: + """ + CSS @position-try rule representation. + """ + + #: The prelude dashed-ident name + name: Value + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name.to_json() + json["origin"] = self.origin.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSPositionTryRule: + return cls( + name=Value.from_json(json["name"]), + origin=StyleSheetOrigin.from_json(json["origin"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSKeyframesRule: + """ + CSS keyframes rule representation. + """ + + #: Animation name. + animation_name: Value + + #: List of keyframes. + keyframes: typing.List[CSSKeyframeRule] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["animationName"] = self.animation_name.to_json() + json["keyframes"] = [i.to_json() for i in self.keyframes] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSKeyframesRule: + return cls( + animation_name=Value.from_json(json["animationName"]), + keyframes=[CSSKeyframeRule.from_json(i) for i in json["keyframes"]], + )
+ + + +
+[docs] +@dataclass +class CSSPropertyRegistration: + """ + Representation of a custom property registration through CSS.registerProperty + """ + + property_name: str + + inherits: bool + + syntax: str + + initial_value: typing.Optional[Value] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["propertyName"] = self.property_name + json["inherits"] = self.inherits + json["syntax"] = self.syntax + if self.initial_value is not None: + json["initialValue"] = self.initial_value.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSPropertyRegistration: + return cls( + property_name=str(json["propertyName"]), + inherits=bool(json["inherits"]), + syntax=str(json["syntax"]), + initial_value=( + Value.from_json(json["initialValue"]) + if json.get("initialValue", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSFontPaletteValuesRule: + """ + CSS font-palette-values rule representation. + """ + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated font palette name. + font_palette_name: Value + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin.to_json() + json["fontPaletteName"] = self.font_palette_name.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSFontPaletteValuesRule: + return cls( + origin=StyleSheetOrigin.from_json(json["origin"]), + font_palette_name=Value.from_json(json["fontPaletteName"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSPropertyRule: + """ + CSS property at-rule representation. + """ + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated property name. + property_name: Value + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin.to_json() + json["propertyName"] = self.property_name.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSPropertyRule: + return cls( + origin=StyleSheetOrigin.from_json(json["origin"]), + property_name=Value.from_json(json["propertyName"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CSSKeyframeRule: + """ + CSS keyframe rule representation. + """ + + #: Parent stylesheet's origin. + origin: StyleSheetOrigin + + #: Associated key text. + key_text: Value + + #: Associated style declaration. + style: CSSStyle + + #: The css style sheet identifier (absent for user agent stylesheet and user-specified + #: stylesheet rules) this rule came from. + style_sheet_id: typing.Optional[StyleSheetId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin.to_json() + json["keyText"] = self.key_text.to_json() + json["style"] = self.style.to_json() + if self.style_sheet_id is not None: + json["styleSheetId"] = self.style_sheet_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSKeyframeRule: + return cls( + origin=StyleSheetOrigin.from_json(json["origin"]), + key_text=Value.from_json(json["keyText"]), + style=CSSStyle.from_json(json["style"]), + style_sheet_id=( + StyleSheetId.from_json(json["styleSheetId"]) + if json.get("styleSheetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class StyleDeclarationEdit: + """ + A descriptor of operation to mutate style declaration text. + """ + + #: The css style sheet identifier. + style_sheet_id: StyleSheetId + + #: The range of the style text in the enclosing stylesheet. + range_: SourceRange + + #: New style text. + text: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["styleSheetId"] = self.style_sheet_id.to_json() + json["range"] = self.range_.to_json() + json["text"] = self.text + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StyleDeclarationEdit: + return cls( + style_sheet_id=StyleSheetId.from_json(json["styleSheetId"]), + range_=SourceRange.from_json(json["range"]), + text=str(json["text"]), + )
+ + + +
+[docs] +def add_rule( + style_sheet_id: StyleSheetId, + rule_text: str, + location: SourceRange, + node_for_property_syntax_validation: typing.Optional[dom.NodeId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSRule]: + """ + Inserts a new rule with the given ``ruleText`` in a stylesheet with given ``styleSheetId``, at the + position specified by ``location``. + + :param style_sheet_id: The css style sheet identifier where a new rule should be inserted. + :param rule_text: The text of a new rule. + :param location: Text position of a new rule in the target style sheet. + :param node_for_property_syntax_validation: **(EXPERIMENTAL)** *(Optional)* NodeId for the DOM node in whose context custom property declarations for registered properties should be validated. If omitted, declarations in the new rule text can only be validated statically, which may produce incorrect results if the declaration contains a var() for example. + :returns: The newly created rule. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["ruleText"] = rule_text + params["location"] = location.to_json() + if node_for_property_syntax_validation is not None: + params["nodeForPropertySyntaxValidation"] = ( + node_for_property_syntax_validation.to_json() + ) + cmd_dict: T_JSON_DICT = { + "method": "CSS.addRule", + "params": params, + } + json = yield cmd_dict + return CSSRule.from_json(json["rule"])
+ + + +
+[docs] +def collect_class_names( + style_sheet_id: StyleSheetId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Returns all class names from specified stylesheet. + + :param style_sheet_id: + :returns: Class name list. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.collectClassNames", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["classNames"]]
+ + + +
+[docs] +def create_style_sheet( + frame_id: page.FrameId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, StyleSheetId]: + """ + Creates a new special "via-inspector" stylesheet in the frame with given ``frameId``. + + :param frame_id: Identifier of the frame where "via-inspector" stylesheet should be created. + :returns: Identifier of the created "via-inspector" stylesheet. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.createStyleSheet", + "params": params, + } + json = yield cmd_dict + return StyleSheetId.from_json(json["styleSheetId"])
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables the CSS agent for the given page. + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables the CSS agent for the given page. Clients should not assume that the CSS agent has been + enabled until the result of this command is received. + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def force_pseudo_state( + node_id: dom.NodeId, forced_pseudo_classes: typing.List[str] +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Ensures that the given node will have specified pseudo-classes whenever its style is computed by + the browser. + + :param node_id: The element id for which to force the pseudo state. + :param forced_pseudo_classes: Element pseudo classes to force when computing the element's style. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["forcedPseudoClasses"] = [i for i in forced_pseudo_classes] + cmd_dict: T_JSON_DICT = { + "method": "CSS.forcePseudoState", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_background_colors( + node_id: dom.NodeId, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.Optional[typing.List[str]], typing.Optional[str], typing.Optional[str] + ], +]: + """ + :param node_id: Id of the node to get background colors for. + :returns: A tuple with the following items: + + 0. **backgroundColors** - *(Optional)* The range of background colors behind this element, if it contains any visible text. If no visible text is present, this will be undefined. In the case of a flat background color, this will consist of simply that color. In the case of a gradient, this will consist of each of the color stops. For anything more complicated, this will be an empty array. Images will be ignored (as if the image had failed to load). + 1. **computedFontSize** - *(Optional)* The computed font size for this node, as a CSS computed value string (e.g. '12px'). + 2. **computedFontWeight** - *(Optional)* The computed font weight for this node, as a CSS computed value string (e.g. 'normal' or '100'). + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getBackgroundColors", + "params": params, + } + json = yield cmd_dict + return ( + ( + [str(i) for i in json["backgroundColors"]] + if json.get("backgroundColors", None) is not None + else None + ), + ( + str(json["computedFontSize"]) + if json.get("computedFontSize", None) is not None + else None + ), + ( + str(json["computedFontWeight"]) + if json.get("computedFontWeight", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_computed_style_for_node( + node_id: dom.NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[CSSComputedStyleProperty]]: + """ + Returns the computed style for a DOM node identified by ``nodeId``. + + :param node_id: + :returns: Computed style for the specified DOM node. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getComputedStyleForNode", + "params": params, + } + json = yield cmd_dict + return [CSSComputedStyleProperty.from_json(i) for i in json["computedStyle"]]
+ + + +
+[docs] +def get_inline_styles_for_node( + node_id: dom.NodeId, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[typing.Optional[CSSStyle], typing.Optional[CSSStyle]], +]: + """ + Returns the styles defined inline (explicitly in the "style" attribute and implicitly, using DOM + attributes) for a DOM node identified by ``nodeId``. + + :param node_id: + :returns: A tuple with the following items: + + 0. **inlineStyle** - *(Optional)* Inline style for the specified DOM node. + 1. **attributesStyle** - *(Optional)* Attribute-defined element style (e.g. resulting from "width=20 height=100%"). + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getInlineStylesForNode", + "params": params, + } + json = yield cmd_dict + return ( + ( + CSSStyle.from_json(json["inlineStyle"]) + if json.get("inlineStyle", None) is not None + else None + ), + ( + CSSStyle.from_json(json["attributesStyle"]) + if json.get("attributesStyle", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_matched_styles_for_node( + node_id: dom.NodeId, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.Optional[CSSStyle], + typing.Optional[CSSStyle], + typing.Optional[typing.List[RuleMatch]], + typing.Optional[typing.List[PseudoElementMatches]], + typing.Optional[typing.List[InheritedStyleEntry]], + typing.Optional[typing.List[InheritedPseudoElementMatches]], + typing.Optional[typing.List[CSSKeyframesRule]], + typing.Optional[typing.List[CSSPositionFallbackRule]], + typing.Optional[typing.List[CSSPositionTryRule]], + typing.Optional[typing.List[CSSPropertyRule]], + typing.Optional[typing.List[CSSPropertyRegistration]], + typing.Optional[CSSFontPaletteValuesRule], + typing.Optional[dom.NodeId], + ], +]: + """ + Returns requested styles for a DOM node identified by ``nodeId``. + + :param node_id: + :returns: A tuple with the following items: + + 0. **inlineStyle** - *(Optional)* Inline style for the specified DOM node. + 1. **attributesStyle** - *(Optional)* Attribute-defined element style (e.g. resulting from "width=20 height=100%"). + 2. **matchedCSSRules** - *(Optional)* CSS rules matching this node, from all applicable stylesheets. + 3. **pseudoElements** - *(Optional)* Pseudo style matches for this node. + 4. **inherited** - *(Optional)* A chain of inherited styles (from the immediate node parent up to the DOM tree root). + 5. **inheritedPseudoElements** - *(Optional)* A chain of inherited pseudo element styles (from the immediate node parent up to the DOM tree root). + 6. **cssKeyframesRules** - *(Optional)* A list of CSS keyframed animations matching this node. + 7. **cssPositionFallbackRules** - *(Optional)* A list of CSS position fallbacks matching this node. + 8. **cssPositionTryRules** - *(Optional)* A list of CSS @position-try rules matching this node, based on the position-try-options property. + 9. **cssPropertyRules** - *(Optional)* A list of CSS at-property rules matching this node. + 10. **cssPropertyRegistrations** - *(Optional)* A list of CSS property registrations matching this node. + 11. **cssFontPaletteValuesRule** - *(Optional)* A font-palette-values rule matching this node. + 12. **parentLayoutNodeId** - *(Optional)* Id of the first parent element that does not have display: contents. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getMatchedStylesForNode", + "params": params, + } + json = yield cmd_dict + return ( + ( + CSSStyle.from_json(json["inlineStyle"]) + if json.get("inlineStyle", None) is not None + else None + ), + ( + CSSStyle.from_json(json["attributesStyle"]) + if json.get("attributesStyle", None) is not None + else None + ), + ( + [RuleMatch.from_json(i) for i in json["matchedCSSRules"]] + if json.get("matchedCSSRules", None) is not None + else None + ), + ( + [PseudoElementMatches.from_json(i) for i in json["pseudoElements"]] + if json.get("pseudoElements", None) is not None + else None + ), + ( + [InheritedStyleEntry.from_json(i) for i in json["inherited"]] + if json.get("inherited", None) is not None + else None + ), + ( + [ + InheritedPseudoElementMatches.from_json(i) + for i in json["inheritedPseudoElements"] + ] + if json.get("inheritedPseudoElements", None) is not None + else None + ), + ( + [CSSKeyframesRule.from_json(i) for i in json["cssKeyframesRules"]] + if json.get("cssKeyframesRules", None) is not None + else None + ), + ( + [ + CSSPositionFallbackRule.from_json(i) + for i in json["cssPositionFallbackRules"] + ] + if json.get("cssPositionFallbackRules", None) is not None + else None + ), + ( + [CSSPositionTryRule.from_json(i) for i in json["cssPositionTryRules"]] + if json.get("cssPositionTryRules", None) is not None + else None + ), + ( + [CSSPropertyRule.from_json(i) for i in json["cssPropertyRules"]] + if json.get("cssPropertyRules", None) is not None + else None + ), + ( + [ + CSSPropertyRegistration.from_json(i) + for i in json["cssPropertyRegistrations"] + ] + if json.get("cssPropertyRegistrations", None) is not None + else None + ), + ( + CSSFontPaletteValuesRule.from_json(json["cssFontPaletteValuesRule"]) + if json.get("cssFontPaletteValuesRule", None) is not None + else None + ), + ( + dom.NodeId.from_json(json["parentLayoutNodeId"]) + if json.get("parentLayoutNodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_media_queries() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[CSSMedia]] +): + """ + Returns all media queries parsed by the rendering engine. + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.getMediaQueries", + } + json = yield cmd_dict + return [CSSMedia.from_json(i) for i in json["medias"]]
+ + + +
+[docs] +def get_platform_fonts_for_node( + node_id: dom.NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[PlatformFontUsage]]: + """ + Requests information about platform fonts which we used to render child TextNodes in the given + node. + + :param node_id: + :returns: Usage statistics for every employed platform font. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getPlatformFontsForNode", + "params": params, + } + json = yield cmd_dict + return [PlatformFontUsage.from_json(i) for i in json["fonts"]]
+ + + +
+[docs] +def get_style_sheet_text( + style_sheet_id: StyleSheetId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns the current textual content for a stylesheet. + + :param style_sheet_id: + :returns: The stylesheet text. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getStyleSheetText", + "params": params, + } + json = yield cmd_dict + return str(json["text"])
+ + + +
+[docs] +def get_layers_for_node( + node_id: dom.NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSLayerData]: + """ + Returns all layers parsed by the rendering engine for the tree scope of a node. + Given a DOM element identified by nodeId, getLayersForNode returns the root + layer for the nearest ancestor document or shadow root. The layer root contains + the full layer tree for the tree scope and their ordering. + + **EXPERIMENTAL** + + :param node_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "CSS.getLayersForNode", + "params": params, + } + json = yield cmd_dict + return CSSLayerData.from_json(json["rootLayer"])
+ + + +
+[docs] +def get_location_for_selector( + style_sheet_id: StyleSheetId, selector_text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[SourceRange]]: + """ + Given a CSS selector text and a style sheet ID, getLocationForSelector + returns an array of locations of the CSS selector in the style sheet. + + **EXPERIMENTAL** + + :param style_sheet_id: + :param selector_text: + :returns: + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["selectorText"] = selector_text + cmd_dict: T_JSON_DICT = { + "method": "CSS.getLocationForSelector", + "params": params, + } + json = yield cmd_dict + return [SourceRange.from_json(i) for i in json["ranges"]]
+ + + +
+[docs] +def track_computed_style_updates( + properties_to_track: typing.List[CSSComputedStyleProperty], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Starts tracking the given computed styles for updates. The specified array of properties + replaces the one previously specified. Pass empty array to disable tracking. + Use takeComputedStyleUpdates to retrieve the list of nodes that had properties modified. + The changes to computed style properties are only tracked for nodes pushed to the front-end + by the DOM agent. If no changes to the tracked properties occur after the node has been pushed + to the front-end, no updates will be issued for the node. + + **EXPERIMENTAL** + + :param properties_to_track: + """ + params: T_JSON_DICT = dict() + params["propertiesToTrack"] = [i.to_json() for i in properties_to_track] + cmd_dict: T_JSON_DICT = { + "method": "CSS.trackComputedStyleUpdates", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def take_computed_style_updates() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[dom.NodeId]] +): + """ + Polls the next batch of computed style updates. + + **EXPERIMENTAL** + + :returns: The list of node Ids that have their tracked computed styles updated. + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.takeComputedStyleUpdates", + } + json = yield cmd_dict + return [dom.NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def set_effective_property_value_for_node( + node_id: dom.NodeId, property_name: str, value: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Find a rule with the given active property for the given node and set the new value for this + property + + :param node_id: The element id for which to set property. + :param property_name: + :param value: + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["propertyName"] = property_name + params["value"] = value + cmd_dict: T_JSON_DICT = { + "method": "CSS.setEffectivePropertyValueForNode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_property_rule_property_name( + style_sheet_id: StyleSheetId, range_: SourceRange, property_name: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Value]: + """ + Modifies the property rule property name. + + :param style_sheet_id: + :param range_: + :param property_name: + :returns: The resulting key text after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["propertyName"] = property_name + cmd_dict: T_JSON_DICT = { + "method": "CSS.setPropertyRulePropertyName", + "params": params, + } + json = yield cmd_dict + return Value.from_json(json["propertyName"])
+ + + +
+[docs] +def set_keyframe_key( + style_sheet_id: StyleSheetId, range_: SourceRange, key_text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Value]: + """ + Modifies the keyframe rule key text. + + :param style_sheet_id: + :param range_: + :param key_text: + :returns: The resulting key text after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["keyText"] = key_text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setKeyframeKey", + "params": params, + } + json = yield cmd_dict + return Value.from_json(json["keyText"])
+ + + +
+[docs] +def set_media_text( + style_sheet_id: StyleSheetId, range_: SourceRange, text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSMedia]: + """ + Modifies the rule selector. + + :param style_sheet_id: + :param range_: + :param text: + :returns: The resulting CSS media rule after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setMediaText", + "params": params, + } + json = yield cmd_dict + return CSSMedia.from_json(json["media"])
+ + + +
+[docs] +def set_container_query_text( + style_sheet_id: StyleSheetId, range_: SourceRange, text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSContainerQuery]: + """ + Modifies the expression of a container query. + + **EXPERIMENTAL** + + :param style_sheet_id: + :param range_: + :param text: + :returns: The resulting CSS container query rule after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setContainerQueryText", + "params": params, + } + json = yield cmd_dict + return CSSContainerQuery.from_json(json["containerQuery"])
+ + + +
+[docs] +def set_supports_text( + style_sheet_id: StyleSheetId, range_: SourceRange, text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSSupports]: + """ + Modifies the expression of a supports at-rule. + + **EXPERIMENTAL** + + :param style_sheet_id: + :param range_: + :param text: + :returns: The resulting CSS Supports rule after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setSupportsText", + "params": params, + } + json = yield cmd_dict + return CSSSupports.from_json(json["supports"])
+ + + +
+[docs] +def set_scope_text( + style_sheet_id: StyleSheetId, range_: SourceRange, text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, CSSScope]: + """ + Modifies the expression of a scope at-rule. + + **EXPERIMENTAL** + + :param style_sheet_id: + :param range_: + :param text: + :returns: The resulting CSS Scope rule after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setScopeText", + "params": params, + } + json = yield cmd_dict + return CSSScope.from_json(json["scope"])
+ + + +
+[docs] +def set_rule_selector( + style_sheet_id: StyleSheetId, range_: SourceRange, selector: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SelectorList]: + """ + Modifies the rule selector. + + :param style_sheet_id: + :param range_: + :param selector: + :returns: The resulting selector list after modification. + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["range"] = range_.to_json() + params["selector"] = selector + cmd_dict: T_JSON_DICT = { + "method": "CSS.setRuleSelector", + "params": params, + } + json = yield cmd_dict + return SelectorList.from_json(json["selectorList"])
+ + + +
+[docs] +def set_style_sheet_text( + style_sheet_id: StyleSheetId, text: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[str]]: + """ + Sets the new stylesheet text. + + :param style_sheet_id: + :param text: + :returns: *(Optional)* URL of source map associated with script (if any). + """ + params: T_JSON_DICT = dict() + params["styleSheetId"] = style_sheet_id.to_json() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "CSS.setStyleSheetText", + "params": params, + } + json = yield cmd_dict + return ( + str(json["sourceMapURL"]) + if json.get("sourceMapURL", None) is not None + else None + )
+ + + +
+[docs] +def set_style_texts( + edits: typing.List[StyleDeclarationEdit], + node_for_property_syntax_validation: typing.Optional[dom.NodeId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[CSSStyle]]: + """ + Applies specified style edits one after another in the given order. + + :param edits: + :param node_for_property_syntax_validation: **(EXPERIMENTAL)** *(Optional)* NodeId for the DOM node in whose context custom property declarations for registered properties should be validated. If omitted, declarations in the new rule text can only be validated statically, which may produce incorrect results if the declaration contains a var() for example. + :returns: The resulting styles after modification. + """ + params: T_JSON_DICT = dict() + params["edits"] = [i.to_json() for i in edits] + if node_for_property_syntax_validation is not None: + params["nodeForPropertySyntaxValidation"] = ( + node_for_property_syntax_validation.to_json() + ) + cmd_dict: T_JSON_DICT = { + "method": "CSS.setStyleTexts", + "params": params, + } + json = yield cmd_dict + return [CSSStyle.from_json(i) for i in json["styles"]]
+ + + +
+[docs] +def start_rule_usage_tracking() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables the selector recording. + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.startRuleUsageTracking", + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_rule_usage_tracking() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[RuleUsage]] +): + """ + Stop tracking rule usage and return the list of rules that were used since last call to + ``takeCoverageDelta`` (or since start of coverage instrumentation). + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.stopRuleUsageTracking", + } + json = yield cmd_dict + return [RuleUsage.from_json(i) for i in json["ruleUsage"]]
+ + + +
+[docs] +def take_coverage_delta() -> ( + typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.List[RuleUsage], float] + ] +): + """ + Obtain list of rules that became used since last call to this method (or since start of coverage + instrumentation). + + :returns: A tuple with the following items: + + 0. **coverage** - + 1. **timestamp** - Monotonically increasing time, in seconds. + """ + cmd_dict: T_JSON_DICT = { + "method": "CSS.takeCoverageDelta", + } + json = yield cmd_dict + return ( + [RuleUsage.from_json(i) for i in json["coverage"]], + float(json["timestamp"]), + )
+ + + +
+[docs] +def set_local_fonts_enabled( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables/disables rendering of local CSS fonts (enabled by default). + + **EXPERIMENTAL** + + :param enabled: Whether rendering of local fonts is enabled. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "CSS.setLocalFontsEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("CSS.fontsUpdated") +@dataclass +class FontsUpdated: + """ + Fires whenever a web font is updated. A non-empty font parameter indicates a successfully loaded + web font. + """ + + #: The web font that has loaded. + font: typing.Optional[FontFace] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FontsUpdated: + return cls( + font=( + FontFace.from_json(json["font"]) + if json.get("font", None) is not None + else None + ) + )
+ + + +
+[docs] +@event_class("CSS.mediaQueryResultChanged") +@dataclass +class MediaQueryResultChanged: + """ + Fires whenever a MediaQuery result changes (for example, after a browser window has been + resized.) The current implementation considers only viewport-dependent media features. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MediaQueryResultChanged: + return cls()
+ + + +
+[docs] +@event_class("CSS.styleSheetAdded") +@dataclass +class StyleSheetAdded: + """ + Fired whenever an active document stylesheet is added. + """ + + #: Added stylesheet metainfo. + header: CSSStyleSheetHeader + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StyleSheetAdded: + return cls(header=CSSStyleSheetHeader.from_json(json["header"]))
+ + + +
+[docs] +@event_class("CSS.styleSheetChanged") +@dataclass +class StyleSheetChanged: + """ + Fired whenever a stylesheet is changed as a result of the client operation. + """ + + style_sheet_id: StyleSheetId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StyleSheetChanged: + return cls(style_sheet_id=StyleSheetId.from_json(json["styleSheetId"]))
+ + + +
+[docs] +@event_class("CSS.styleSheetRemoved") +@dataclass +class StyleSheetRemoved: + """ + Fired whenever an active document stylesheet is removed. + """ + + #: Identifier of the removed stylesheet. + style_sheet_id: StyleSheetId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StyleSheetRemoved: + return cls(style_sheet_id=StyleSheetId.from_json(json["styleSheetId"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/database.html b/docs/_build/html/_modules/nodriver/cdp/database.html new file mode 100644 index 0000000..d3ac143 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/database.html @@ -0,0 +1,504 @@ + + + + + + + + nodriver.cdp.database - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.database

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Database (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class DatabaseId(str): + """ + Unique identifier of Database object. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> DatabaseId: + return cls(json) + + def __repr__(self): + return "DatabaseId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class Database: + """ + Database object. + """ + + #: Database ID. + id_: DatabaseId + + #: Database domain. + domain: str + + #: Database name. + name: str + + #: Database version. + version: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["domain"] = self.domain + json["name"] = self.name + json["version"] = self.version + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Database: + return cls( + id_=DatabaseId.from_json(json["id"]), + domain=str(json["domain"]), + name=str(json["name"]), + version=str(json["version"]), + )
+ + + +
+[docs] +@dataclass +class Error: + """ + Database error. + """ + + #: Error message. + message: str + + #: Error code. + code: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["message"] = self.message + json["code"] = self.code + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Error: + return cls( + message=str(json["message"]), + code=int(json["code"]), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables database tracking, prevents database events from being sent to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Database.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables database tracking, database events will now be delivered to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Database.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def execute_sql(database_id: DatabaseId, query: str) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.Optional[typing.List[str]], + typing.Optional[typing.List[typing.Any]], + typing.Optional[Error], + ], +]: + """ + :param database_id: + :param query: + :returns: A tuple with the following items: + + 0. **columnNames** - + 1. **values** - + 2. **sqlError** - + """ + params: T_JSON_DICT = dict() + params["databaseId"] = database_id.to_json() + params["query"] = query + cmd_dict: T_JSON_DICT = { + "method": "Database.executeSQL", + "params": params, + } + json = yield cmd_dict + return ( + ( + [str(i) for i in json["columnNames"]] + if json.get("columnNames", None) is not None + else None + ), + [i for i in json["values"]] if json.get("values", None) is not None else None, + ( + Error.from_json(json["sqlError"]) + if json.get("sqlError", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_database_table_names( + database_id: DatabaseId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + :param database_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["databaseId"] = database_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Database.getDatabaseTableNames", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["tableNames"]]
+ + + +
+[docs] +@event_class("Database.addDatabase") +@dataclass +class AddDatabase: + database: Database + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddDatabase: + return cls(database=Database.from_json(json["database"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/debugger.html b/docs/_build/html/_modules/nodriver/cdp/debugger.html new file mode 100644 index 0000000..4a6dc10 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/debugger.html @@ -0,0 +1,1989 @@ + + + + + + + + nodriver.cdp.debugger - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.debugger

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Debugger
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import runtime
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class BreakpointId(str): + """ + Breakpoint identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> BreakpointId: + return cls(json) + + def __repr__(self): + return "BreakpointId({})".format(super().__repr__())
+ + + +
+[docs] +class CallFrameId(str): + """ + Call frame identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> CallFrameId: + return cls(json) + + def __repr__(self): + return "CallFrameId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class Location: + """ + Location in the source code. + """ + + #: Script identifier as reported in the ``Debugger.scriptParsed``. + script_id: runtime.ScriptId + + #: Line number in the script (0-based). + line_number: int + + #: Column number in the script (0-based). + column_number: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scriptId"] = self.script_id.to_json() + json["lineNumber"] = self.line_number + if self.column_number is not None: + json["columnNumber"] = self.column_number + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Location: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + line_number=int(json["lineNumber"]), + column_number=( + int(json["columnNumber"]) + if json.get("columnNumber", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ScriptPosition: + """ + Location in the source code. + """ + + line_number: int + + column_number: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScriptPosition: + return cls( + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + )
+ + + +
+[docs] +@dataclass +class LocationRange: + """ + Location range within one script. + """ + + script_id: runtime.ScriptId + + start: ScriptPosition + + end: ScriptPosition + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scriptId"] = self.script_id.to_json() + json["start"] = self.start.to_json() + json["end"] = self.end.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LocationRange: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + start=ScriptPosition.from_json(json["start"]), + end=ScriptPosition.from_json(json["end"]), + )
+ + + +
+[docs] +@dataclass +class CallFrame: + """ + JavaScript call frame. Array of call frames form the call stack. + """ + + #: Call frame identifier. This identifier is only valid while the virtual machine is paused. + call_frame_id: CallFrameId + + #: Name of the JavaScript function called on this call frame. + function_name: str + + #: Location in the source code. + location: Location + + #: JavaScript script name or url. + #: Deprecated in favor of using the ``location.scriptId`` to resolve the URL via a previously + #: sent ``Debugger.scriptParsed`` event. + url: str + + #: Scope chain for this call frame. + scope_chain: typing.List[Scope] + + #: ``this`` object for this call frame. + this: runtime.RemoteObject + + #: Location in the source code. + function_location: typing.Optional[Location] = None + + #: The value being returned, if the function is at return point. + return_value: typing.Optional[runtime.RemoteObject] = None + + #: Valid only while the VM is paused and indicates whether this frame + #: can be restarted or not. Note that a ``true`` value here does not + #: guarantee that Debugger#restartFrame with this CallFrameId will be + #: successful, but it is very likely. + can_be_restarted: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["callFrameId"] = self.call_frame_id.to_json() + json["functionName"] = self.function_name + json["location"] = self.location.to_json() + json["url"] = self.url + json["scopeChain"] = [i.to_json() for i in self.scope_chain] + json["this"] = self.this.to_json() + if self.function_location is not None: + json["functionLocation"] = self.function_location.to_json() + if self.return_value is not None: + json["returnValue"] = self.return_value.to_json() + if self.can_be_restarted is not None: + json["canBeRestarted"] = self.can_be_restarted + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CallFrame: + return cls( + call_frame_id=CallFrameId.from_json(json["callFrameId"]), + function_name=str(json["functionName"]), + location=Location.from_json(json["location"]), + url=str(json["url"]), + scope_chain=[Scope.from_json(i) for i in json["scopeChain"]], + this=runtime.RemoteObject.from_json(json["this"]), + function_location=( + Location.from_json(json["functionLocation"]) + if json.get("functionLocation", None) is not None + else None + ), + return_value=( + runtime.RemoteObject.from_json(json["returnValue"]) + if json.get("returnValue", None) is not None + else None + ), + can_be_restarted=( + bool(json["canBeRestarted"]) + if json.get("canBeRestarted", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Scope: + """ + Scope description. + """ + + #: Scope type. + type_: str + + #: Object representing the scope. For ``global`` and ``with`` scopes it represents the actual + #: object; for the rest of the scopes, it is artificial transient object enumerating scope + #: variables as its properties. + object_: runtime.RemoteObject + + name: typing.Optional[str] = None + + #: Location in the source code where scope starts + start_location: typing.Optional[Location] = None + + #: Location in the source code where scope ends + end_location: typing.Optional[Location] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + json["object"] = self.object_.to_json() + if self.name is not None: + json["name"] = self.name + if self.start_location is not None: + json["startLocation"] = self.start_location.to_json() + if self.end_location is not None: + json["endLocation"] = self.end_location.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Scope: + return cls( + type_=str(json["type"]), + object_=runtime.RemoteObject.from_json(json["object"]), + name=str(json["name"]) if json.get("name", None) is not None else None, + start_location=( + Location.from_json(json["startLocation"]) + if json.get("startLocation", None) is not None + else None + ), + end_location=( + Location.from_json(json["endLocation"]) + if json.get("endLocation", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SearchMatch: + """ + Search match for resource. + """ + + #: Line number in resource content. + line_number: float + + #: Line with match content. + line_content: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["lineNumber"] = self.line_number + json["lineContent"] = self.line_content + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SearchMatch: + return cls( + line_number=float(json["lineNumber"]), + line_content=str(json["lineContent"]), + )
+ + + +
+[docs] +@dataclass +class BreakLocation: + #: Script identifier as reported in the ``Debugger.scriptParsed``. + script_id: runtime.ScriptId + + #: Line number in the script (0-based). + line_number: int + + #: Column number in the script (0-based). + column_number: typing.Optional[int] = None + + type_: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scriptId"] = self.script_id.to_json() + json["lineNumber"] = self.line_number + if self.column_number is not None: + json["columnNumber"] = self.column_number + if self.type_ is not None: + json["type"] = self.type_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BreakLocation: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + line_number=int(json["lineNumber"]), + column_number=( + int(json["columnNumber"]) + if json.get("columnNumber", None) is not None + else None + ), + type_=str(json["type"]) if json.get("type", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class WasmDisassemblyChunk: + #: The next chunk of disassembled lines. + lines: typing.List[str] + + #: The bytecode offsets describing the start of each line. + bytecode_offsets: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["lines"] = [i for i in self.lines] + json["bytecodeOffsets"] = [i for i in self.bytecode_offsets] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WasmDisassemblyChunk: + return cls( + lines=[str(i) for i in json["lines"]], + bytecode_offsets=[int(i) for i in json["bytecodeOffsets"]], + )
+ + + +
+[docs] +class ScriptLanguage(enum.Enum): + """ + Enum of possible script languages. + """ + + JAVA_SCRIPT = "JavaScript" + WEB_ASSEMBLY = "WebAssembly" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ScriptLanguage: + return cls(json)
+ + + +
+[docs] +@dataclass +class DebugSymbols: + """ + Debug symbols available for a wasm script. + """ + + #: Type of the debug symbols. + type_: str + + #: URL of the external symbol source. + external_url: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.external_url is not None: + json["externalURL"] = self.external_url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DebugSymbols: + return cls( + type_=str(json["type"]), + external_url=( + str(json["externalURL"]) + if json.get("externalURL", None) is not None + else None + ), + )
+ + + +
+[docs] +def continue_to_location( + location: Location, target_call_frames: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Continues execution until specific location is reached. + + :param location: Location to continue to. + :param target_call_frames: *(Optional)* + """ + params: T_JSON_DICT = dict() + params["location"] = location.to_json() + if target_call_frames is not None: + params["targetCallFrames"] = target_call_frames + cmd_dict: T_JSON_DICT = { + "method": "Debugger.continueToLocation", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables debugger for given page. + """ + cmd_dict: T_JSON_DICT = { + "method": "Debugger.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable( + max_scripts_cache_size: typing.Optional[float] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.UniqueDebuggerId]: + """ + Enables debugger for the given page. Clients should not assume that the debugging has been + enabled until the result for this command is received. + + :param max_scripts_cache_size: **(EXPERIMENTAL)** *(Optional)* The maximum size in bytes of collected scripts (not referenced by other heap objects) the debugger can hold. Puts no limit if parameter is omitted. + :returns: Unique identifier of the debugger. + """ + params: T_JSON_DICT = dict() + if max_scripts_cache_size is not None: + params["maxScriptsCacheSize"] = max_scripts_cache_size + cmd_dict: T_JSON_DICT = { + "method": "Debugger.enable", + "params": params, + } + json = yield cmd_dict + return runtime.UniqueDebuggerId.from_json(json["debuggerId"])
+ + + +
+[docs] +def evaluate_on_call_frame( + call_frame_id: CallFrameId, + expression: str, + object_group: typing.Optional[str] = None, + include_command_line_api: typing.Optional[bool] = None, + silent: typing.Optional[bool] = None, + return_by_value: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, + throw_on_side_effect: typing.Optional[bool] = None, + timeout: typing.Optional[runtime.TimeDelta] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[runtime.RemoteObject, typing.Optional[runtime.ExceptionDetails]], +]: + """ + Evaluates expression on a given call frame. + + :param call_frame_id: Call frame identifier to evaluate on. + :param expression: Expression to evaluate. + :param object_group: *(Optional)* String object group name to put result into (allows rapid releasing resulting object handles using ```releaseObjectGroup````). + :param include_command_line_api: *(Optional)* Specifies whether command line API should be available to the evaluated expression, defaults to false. + :param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ````setPauseOnException``` state. + :param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value. + :param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result. + :param throw_on_side_effect: *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation. + :param timeout: **(EXPERIMENTAL)** *(Optional)* Terminate execution after timing out (number of milliseconds). + :returns: A tuple with the following items: + + 0. **result** - Object wrapper for the evaluation result. + 1. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["callFrameId"] = call_frame_id.to_json() + params["expression"] = expression + if object_group is not None: + params["objectGroup"] = object_group + if include_command_line_api is not None: + params["includeCommandLineAPI"] = include_command_line_api + if silent is not None: + params["silent"] = silent + if return_by_value is not None: + params["returnByValue"] = return_by_value + if generate_preview is not None: + params["generatePreview"] = generate_preview + if throw_on_side_effect is not None: + params["throwOnSideEffect"] = throw_on_side_effect + if timeout is not None: + params["timeout"] = timeout.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.evaluateOnCallFrame", + "params": params, + } + json = yield cmd_dict + return ( + runtime.RemoteObject.from_json(json["result"]), + ( + runtime.ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_possible_breakpoints( + start: Location, + end: typing.Optional[Location] = None, + restrict_to_function: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[BreakLocation]]: + """ + Returns possible locations for breakpoint. scriptId in start and end range locations should be + the same. + + :param start: Start of range to search possible breakpoint locations in. + :param end: *(Optional)* End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range. + :param restrict_to_function: *(Optional)* Only consider locations which are in the same (non-nested) function as start. + :returns: List of the possible breakpoint locations. + """ + params: T_JSON_DICT = dict() + params["start"] = start.to_json() + if end is not None: + params["end"] = end.to_json() + if restrict_to_function is not None: + params["restrictToFunction"] = restrict_to_function + cmd_dict: T_JSON_DICT = { + "method": "Debugger.getPossibleBreakpoints", + "params": params, + } + json = yield cmd_dict + return [BreakLocation.from_json(i) for i in json["locations"]]
+ + + +
+[docs] +def get_script_source( + script_id: runtime.ScriptId, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, typing.Optional[str]] +]: + """ + Returns source for the script with given id. + + :param script_id: Id of the script to get source for. + :returns: A tuple with the following items: + + 0. **scriptSource** - Script source (empty in case of Wasm bytecode). + 1. **bytecode** - *(Optional)* Wasm bytecode. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.getScriptSource", + "params": params, + } + json = yield cmd_dict + return ( + str(json["scriptSource"]), + str(json["bytecode"]) if json.get("bytecode", None) is not None else None, + )
+ + + +
+[docs] +def disassemble_wasm_module( + script_id: runtime.ScriptId, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[typing.Optional[str], int, typing.List[int], WasmDisassemblyChunk], +]: + """ + + + **EXPERIMENTAL** + + :param script_id: Id of the script to disassemble + :returns: A tuple with the following items: + + 0. **streamId** - *(Optional)* For large modules, return a stream from which additional chunks of disassembly can be read successively. + 1. **totalNumberOfLines** - The total number of lines in the disassembly text. + 2. **functionBodyOffsets** - The offsets of all function bodies, in the format [start1, end1, start2, end2, ...] where all ends are exclusive. + 3. **chunk** - The first chunk of disassembly. + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.disassembleWasmModule", + "params": params, + } + json = yield cmd_dict + return ( + str(json["streamId"]) if json.get("streamId", None) is not None else None, + int(json["totalNumberOfLines"]), + [int(i) for i in json["functionBodyOffsets"]], + WasmDisassemblyChunk.from_json(json["chunk"]), + )
+ + + +
+[docs] +def next_wasm_disassembly_chunk( + stream_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, WasmDisassemblyChunk]: + """ + Disassemble the next chunk of lines for the module corresponding to the + stream. If disassembly is complete, this API will invalidate the streamId + and return an empty chunk. Any subsequent calls for the now invalid stream + will return errors. + + **EXPERIMENTAL** + + :param stream_id: + :returns: The next chunk of disassembly. + """ + params: T_JSON_DICT = dict() + params["streamId"] = stream_id + cmd_dict: T_JSON_DICT = { + "method": "Debugger.nextWasmDisassemblyChunk", + "params": params, + } + json = yield cmd_dict + return WasmDisassemblyChunk.from_json(json["chunk"])
+ + + +
+[docs] +@deprecated(version="1.3") +def get_wasm_bytecode( + script_id: runtime.ScriptId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + This command is deprecated. Use getScriptSource instead. + + .. deprecated:: 1.3 + + :param script_id: Id of the Wasm script to get source for. + :returns: Script source. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.getWasmBytecode", + "params": params, + } + json = yield cmd_dict + return str(json["bytecode"])
+ + + +
+[docs] +def get_stack_trace( + stack_trace_id: runtime.StackTraceId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.StackTrace]: + """ + Returns stack trace with given ``stackTraceId``. + + **EXPERIMENTAL** + + :param stack_trace_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["stackTraceId"] = stack_trace_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.getStackTrace", + "params": params, + } + json = yield cmd_dict + return runtime.StackTrace.from_json(json["stackTrace"])
+ + + +
+[docs] +def pause() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stops on the next JavaScript statement. + """ + cmd_dict: T_JSON_DICT = { + "method": "Debugger.pause", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def pause_on_async_call( + parent_stack_trace_id: runtime.StackTraceId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param parent_stack_trace_id: Debugger will pause when async call with given stack trace is started. + """ + params: T_JSON_DICT = dict() + params["parentStackTraceId"] = parent_stack_trace_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.pauseOnAsyncCall", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_breakpoint( + breakpoint_id: BreakpointId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes JavaScript breakpoint. + + :param breakpoint_id: + """ + params: T_JSON_DICT = dict() + params["breakpointId"] = breakpoint_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.removeBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def restart_frame( + call_frame_id: CallFrameId, mode: typing.Optional[str] = None +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.List[CallFrame], + typing.Optional[runtime.StackTrace], + typing.Optional[runtime.StackTraceId], + ], +]: + """ + Restarts particular call frame from the beginning. The old, deprecated + behavior of ``restartFrame`` is to stay paused and allow further CDP commands + after a restart was scheduled. This can cause problems with restarting, so + we now continue execution immediatly after it has been scheduled until we + reach the beginning of the restarted frame. + + To stay back-wards compatible, ``restartFrame`` now expects a ``mode`` + parameter to be present. If the ``mode`` parameter is missing, ``restartFrame`` + errors out. + + The various return values are deprecated and ``callFrames`` is always empty. + Use the call frames from the ``Debugger#paused`` events instead, that fires + once V8 pauses at the beginning of the restarted function. + + :param call_frame_id: Call frame identifier to evaluate on. + :param mode: **(EXPERIMENTAL)** *(Optional)* The ```mode```` parameter must be present and set to 'StepInto', otherwise ````restartFrame``` will error out. + :returns: A tuple with the following items: + + 0. **callFrames** - New stack trace. + 1. **asyncStackTrace** - *(Optional)* Async stack trace, if any. + 2. **asyncStackTraceId** - *(Optional)* Async stack trace, if any. + """ + params: T_JSON_DICT = dict() + params["callFrameId"] = call_frame_id.to_json() + if mode is not None: + params["mode"] = mode + cmd_dict: T_JSON_DICT = { + "method": "Debugger.restartFrame", + "params": params, + } + json = yield cmd_dict + return ( + [CallFrame.from_json(i) for i in json["callFrames"]], + ( + runtime.StackTrace.from_json(json["asyncStackTrace"]) + if json.get("asyncStackTrace", None) is not None + else None + ), + ( + runtime.StackTraceId.from_json(json["asyncStackTraceId"]) + if json.get("asyncStackTraceId", None) is not None + else None + ), + )
+ + + +
+[docs] +def resume( + terminate_on_resume: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resumes JavaScript execution. + + :param terminate_on_resume: *(Optional)* Set to true to terminate execution upon resuming execution. In contrast to Runtime.terminateExecution, this will allows to execute further JavaScript (i.e. via evaluation) until execution of the paused code is actually resumed, at which point termination is triggered. If execution is currently not paused, this parameter has no effect. + """ + params: T_JSON_DICT = dict() + if terminate_on_resume is not None: + params["terminateOnResume"] = terminate_on_resume + cmd_dict: T_JSON_DICT = { + "method": "Debugger.resume", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def search_in_content( + script_id: runtime.ScriptId, + query: str, + case_sensitive: typing.Optional[bool] = None, + is_regex: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[SearchMatch]]: + """ + Searches for given string in script content. + + :param script_id: Id of the script to search in. + :param query: String to search for. + :param case_sensitive: *(Optional)* If true, search is case sensitive. + :param is_regex: *(Optional)* If true, treats string parameter as regex. + :returns: List of search matches. + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + params["query"] = query + if case_sensitive is not None: + params["caseSensitive"] = case_sensitive + if is_regex is not None: + params["isRegex"] = is_regex + cmd_dict: T_JSON_DICT = { + "method": "Debugger.searchInContent", + "params": params, + } + json = yield cmd_dict + return [SearchMatch.from_json(i) for i in json["result"]]
+ + + +
+[docs] +def set_async_call_stack_depth( + max_depth: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables or disables async call stacks tracking. + + :param max_depth: Maximum depth of async call stacks. Setting to ```0``` will effectively disable collecting async call stacks (default). + """ + params: T_JSON_DICT = dict() + params["maxDepth"] = max_depth + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setAsyncCallStackDepth", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_blackbox_patterns( + patterns: typing.List[str], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in + scripts with url matching one of the patterns. VM will try to leave blackboxed script by + performing 'step in' several times, finally resorting to 'step out' if unsuccessful. + + **EXPERIMENTAL** + + :param patterns: Array of regexps that will be used to check script url for blackbox state. + """ + params: T_JSON_DICT = dict() + params["patterns"] = [i for i in patterns] + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBlackboxPatterns", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_blackboxed_ranges( + script_id: runtime.ScriptId, positions: typing.List[ScriptPosition] +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted + scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. + Positions array contains positions where blackbox state is changed. First interval isn't + blackboxed. Array should be sorted. + + **EXPERIMENTAL** + + :param script_id: Id of the script. + :param positions: + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + params["positions"] = [i.to_json() for i in positions] + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBlackboxedRanges", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_breakpoint( + location: Location, condition: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[BreakpointId, Location]]: + """ + Sets JavaScript breakpoint at a given location. + + :param location: Location to set breakpoint in. + :param condition: *(Optional)* Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true. + :returns: A tuple with the following items: + + 0. **breakpointId** - Id of the created breakpoint for further reference. + 1. **actualLocation** - Location this breakpoint resolved into. + """ + params: T_JSON_DICT = dict() + params["location"] = location.to_json() + if condition is not None: + params["condition"] = condition + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBreakpoint", + "params": params, + } + json = yield cmd_dict + return ( + BreakpointId.from_json(json["breakpointId"]), + Location.from_json(json["actualLocation"]), + )
+ + + +
+[docs] +def set_instrumentation_breakpoint( + instrumentation: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, BreakpointId]: + """ + Sets instrumentation breakpoint. + + :param instrumentation: Instrumentation name. + :returns: Id of the created breakpoint for further reference. + """ + params: T_JSON_DICT = dict() + params["instrumentation"] = instrumentation + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setInstrumentationBreakpoint", + "params": params, + } + json = yield cmd_dict + return BreakpointId.from_json(json["breakpointId"])
+ + + +
+[docs] +def set_breakpoint_by_url( + line_number: int, + url: typing.Optional[str] = None, + url_regex: typing.Optional[str] = None, + script_hash: typing.Optional[str] = None, + column_number: typing.Optional[int] = None, + condition: typing.Optional[str] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[BreakpointId, typing.List[Location]] +]: + """ + Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this + command is issued, all existing parsed scripts will have breakpoints resolved and returned in + ``locations`` property. Further matching script parsing will result in subsequent + ``breakpointResolved`` events issued. This logical breakpoint will survive page reloads. + + :param line_number: Line number to set breakpoint at. + :param url: *(Optional)* URL of the resources to set breakpoint on. + :param url_regex: *(Optional)* Regex pattern for the URLs of the resources to set breakpoints on. Either ```url```` or ````urlRegex``` must be specified. + :param script_hash: *(Optional)* Script hash of the resources to set breakpoint on. + :param column_number: *(Optional)* Offset in the line to set breakpoint at. + :param condition: *(Optional)* Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true. + :returns: A tuple with the following items: + + 0. **breakpointId** - Id of the created breakpoint for further reference. + 1. **locations** - List of the locations this breakpoint resolved into upon addition. + """ + params: T_JSON_DICT = dict() + params["lineNumber"] = line_number + if url is not None: + params["url"] = url + if url_regex is not None: + params["urlRegex"] = url_regex + if script_hash is not None: + params["scriptHash"] = script_hash + if column_number is not None: + params["columnNumber"] = column_number + if condition is not None: + params["condition"] = condition + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBreakpointByUrl", + "params": params, + } + json = yield cmd_dict + return ( + BreakpointId.from_json(json["breakpointId"]), + [Location.from_json(i) for i in json["locations"]], + )
+ + + +
+[docs] +def set_breakpoint_on_function_call( + object_id: runtime.RemoteObjectId, condition: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, BreakpointId]: + """ + Sets JavaScript breakpoint before each call to the given function. + If another function was created from the same source as a given one, + calling it will also trigger the breakpoint. + + **EXPERIMENTAL** + + :param object_id: Function object id. + :param condition: *(Optional)* Expression to use as a breakpoint condition. When specified, debugger will stop on the breakpoint if this expression evaluates to true. + :returns: Id of the created breakpoint for further reference. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + if condition is not None: + params["condition"] = condition + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBreakpointOnFunctionCall", + "params": params, + } + json = yield cmd_dict + return BreakpointId.from_json(json["breakpointId"])
+ + + +
+[docs] +def set_breakpoints_active( + active: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Activates / deactivates all breakpoints on the page. + + :param active: New value for breakpoints active state. + """ + params: T_JSON_DICT = dict() + params["active"] = active + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setBreakpointsActive", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_pause_on_exceptions( + state: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions, + or caught exceptions, no exceptions. Initial pause on exceptions state is ``none``. + + :param state: Pause on exceptions mode. + """ + params: T_JSON_DICT = dict() + params["state"] = state + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setPauseOnExceptions", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_return_value( + new_value: runtime.CallArgument, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Changes return value in top frame. Available only at return break position. + + **EXPERIMENTAL** + + :param new_value: New return value. + """ + params: T_JSON_DICT = dict() + params["newValue"] = new_value.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setReturnValue", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_script_source( + script_id: runtime.ScriptId, + script_source: str, + dry_run: typing.Optional[bool] = None, + allow_top_frame_editing: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.Optional[typing.List[CallFrame]], + typing.Optional[bool], + typing.Optional[runtime.StackTrace], + typing.Optional[runtime.StackTraceId], + str, + typing.Optional[runtime.ExceptionDetails], + ], +]: + """ + Edits JavaScript source live. + + In general, functions that are currently on the stack can not be edited with + a single exception: If the edited function is the top-most stack frame and + that is the only activation of that function on the stack. In this case + the live edit will be successful and a ``Debugger.restartFrame`` for the + top-most function is automatically triggered. + + :param script_id: Id of the script to edit. + :param script_source: New content of the script. + :param dry_run: *(Optional)* If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code. + :param allow_top_frame_editing: **(EXPERIMENTAL)** *(Optional)* If true, then ```scriptSource```` is allowed to change the function on top of the stack as long as the top-most stack frame is the only activation of that function. + :returns: A tuple with the following items: + + 0. **callFrames** - *(Optional)* New stack trace in case editing has happened while VM was stopped. + 1. **stackChanged** - *(Optional)* Whether current call stack was modified after applying the changes. + 2. **asyncStackTrace** - *(Optional)* Async stack trace, if any. + 3. **asyncStackTraceId** - *(Optional)* Async stack trace, if any. + 4. **status** - Whether the operation was successful or not. Only `` Ok`` denotes a successful live edit while the other enum variants denote why the live edit failed. + 5. **exceptionDetails** - *(Optional)* Exception details if any. Only present when `` status`` is `` CompileError`. + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + params["scriptSource"] = script_source + if dry_run is not None: + params["dryRun"] = dry_run + if allow_top_frame_editing is not None: + params["allowTopFrameEditing"] = allow_top_frame_editing + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setScriptSource", + "params": params, + } + json = yield cmd_dict + return ( + ( + [CallFrame.from_json(i) for i in json["callFrames"]] + if json.get("callFrames", None) is not None + else None + ), + ( + bool(json["stackChanged"]) + if json.get("stackChanged", None) is not None + else None + ), + ( + runtime.StackTrace.from_json(json["asyncStackTrace"]) + if json.get("asyncStackTrace", None) is not None + else None + ), + ( + runtime.StackTraceId.from_json(json["asyncStackTraceId"]) + if json.get("asyncStackTraceId", None) is not None + else None + ), + str(json["status"]), + ( + runtime.ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def set_skip_all_pauses(skip: bool) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc). + + :param skip: New value for skip pauses state. + """ + params: T_JSON_DICT = dict() + params["skip"] = skip + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setSkipAllPauses", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_variable_value( + scope_number: int, + variable_name: str, + new_value: runtime.CallArgument, + call_frame_id: CallFrameId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Changes value of variable in a callframe. Object-based scopes are not supported and must be + mutated manually. + + :param scope_number: 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually. + :param variable_name: Variable name. + :param new_value: New variable value. + :param call_frame_id: Id of callframe that holds variable. + """ + params: T_JSON_DICT = dict() + params["scopeNumber"] = scope_number + params["variableName"] = variable_name + params["newValue"] = new_value.to_json() + params["callFrameId"] = call_frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Debugger.setVariableValue", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def step_into( + break_on_async_call: typing.Optional[bool] = None, + skip_list: typing.Optional[typing.List[LocationRange]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Steps into the function call. + + :param break_on_async_call: **(EXPERIMENTAL)** *(Optional)* Debugger will pause on the execution of the first async task which was scheduled before next pause. + :param skip_list: **(EXPERIMENTAL)** *(Optional)* The skipList specifies location ranges that should be skipped on step into. + """ + params: T_JSON_DICT = dict() + if break_on_async_call is not None: + params["breakOnAsyncCall"] = break_on_async_call + if skip_list is not None: + params["skipList"] = [i.to_json() for i in skip_list] + cmd_dict: T_JSON_DICT = { + "method": "Debugger.stepInto", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def step_out() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Steps out of the function call. + """ + cmd_dict: T_JSON_DICT = { + "method": "Debugger.stepOut", + } + json = yield cmd_dict
+ + + +
+[docs] +def step_over( + skip_list: typing.Optional[typing.List[LocationRange]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Steps over the statement. + + :param skip_list: **(EXPERIMENTAL)** *(Optional)* The skipList specifies location ranges that should be skipped on step over. + """ + params: T_JSON_DICT = dict() + if skip_list is not None: + params["skipList"] = [i.to_json() for i in skip_list] + cmd_dict: T_JSON_DICT = { + "method": "Debugger.stepOver", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Debugger.breakpointResolved") +@dataclass +class BreakpointResolved: + """ + Fired when breakpoint is resolved to an actual script and location. + """ + + #: Breakpoint unique identifier. + breakpoint_id: BreakpointId + #: Actual breakpoint location. + location: Location + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BreakpointResolved: + return cls( + breakpoint_id=BreakpointId.from_json(json["breakpointId"]), + location=Location.from_json(json["location"]), + )
+ + + +
+[docs] +@event_class("Debugger.paused") +@dataclass +class Paused: + """ + Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria. + """ + + #: Call stack the virtual machine stopped on. + call_frames: typing.List[CallFrame] + #: Pause reason. + reason: str + #: Object containing break-specific auxiliary properties. + data: typing.Optional[dict] + #: Hit breakpoints IDs + hit_breakpoints: typing.Optional[typing.List[str]] + #: Async stack trace, if any. + async_stack_trace: typing.Optional[runtime.StackTrace] + #: Async stack trace, if any. + async_stack_trace_id: typing.Optional[runtime.StackTraceId] + #: Never present, will be removed. + async_call_stack_trace_id: typing.Optional[runtime.StackTraceId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Paused: + return cls( + call_frames=[CallFrame.from_json(i) for i in json["callFrames"]], + reason=str(json["reason"]), + data=dict(json["data"]) if json.get("data", None) is not None else None, + hit_breakpoints=( + [str(i) for i in json["hitBreakpoints"]] + if json.get("hitBreakpoints", None) is not None + else None + ), + async_stack_trace=( + runtime.StackTrace.from_json(json["asyncStackTrace"]) + if json.get("asyncStackTrace", None) is not None + else None + ), + async_stack_trace_id=( + runtime.StackTraceId.from_json(json["asyncStackTraceId"]) + if json.get("asyncStackTraceId", None) is not None + else None + ), + async_call_stack_trace_id=( + runtime.StackTraceId.from_json(json["asyncCallStackTraceId"]) + if json.get("asyncCallStackTraceId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Debugger.resumed") +@dataclass +class Resumed: + """ + Fired when the virtual machine resumed execution. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Resumed: + return cls()
+ + + +
+[docs] +@event_class("Debugger.scriptFailedToParse") +@dataclass +class ScriptFailedToParse: + """ + Fired when virtual machine fails to parse the script. + """ + + #: Identifier of the script parsed. + script_id: runtime.ScriptId + #: URL or name of the script parsed (if any). + url: str + #: Line offset of the script within the resource with given URL (for script tags). + start_line: int + #: Column offset of the script within the resource with given URL. + start_column: int + #: Last line of the script. + end_line: int + #: Length of the last line of the script. + end_column: int + #: Specifies script creation context. + execution_context_id: runtime.ExecutionContextId + #: Content hash of the script, SHA-256. + hash_: str + #: Embedder-specific auxiliary data likely matching {isDefault: boolean, type: 'default'``'isolated'``'worker', frameId: string} + execution_context_aux_data: typing.Optional[dict] + #: URL of source map associated with script (if any). + source_map_url: typing.Optional[str] + #: True, if this script has sourceURL. + has_source_url: typing.Optional[bool] + #: True, if this script is ES6 module. + is_module: typing.Optional[bool] + #: This script length. + length: typing.Optional[int] + #: JavaScript top stack frame of where the script parsed event was triggered if available. + stack_trace: typing.Optional[runtime.StackTrace] + #: If the scriptLanguage is WebAssembly, the code section offset in the module. + code_offset: typing.Optional[int] + #: The language of the script. + script_language: typing.Optional[ScriptLanguage] + #: The name the embedder supplied for this script. + embedder_name: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScriptFailedToParse: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + url=str(json["url"]), + start_line=int(json["startLine"]), + start_column=int(json["startColumn"]), + end_line=int(json["endLine"]), + end_column=int(json["endColumn"]), + execution_context_id=runtime.ExecutionContextId.from_json( + json["executionContextId"] + ), + hash_=str(json["hash"]), + execution_context_aux_data=( + dict(json["executionContextAuxData"]) + if json.get("executionContextAuxData", None) is not None + else None + ), + source_map_url=( + str(json["sourceMapURL"]) + if json.get("sourceMapURL", None) is not None + else None + ), + has_source_url=( + bool(json["hasSourceURL"]) + if json.get("hasSourceURL", None) is not None + else None + ), + is_module=( + bool(json["isModule"]) + if json.get("isModule", None) is not None + else None + ), + length=( + int(json["length"]) if json.get("length", None) is not None else None + ), + stack_trace=( + runtime.StackTrace.from_json(json["stackTrace"]) + if json.get("stackTrace", None) is not None + else None + ), + code_offset=( + int(json["codeOffset"]) + if json.get("codeOffset", None) is not None + else None + ), + script_language=( + ScriptLanguage.from_json(json["scriptLanguage"]) + if json.get("scriptLanguage", None) is not None + else None + ), + embedder_name=( + str(json["embedderName"]) + if json.get("embedderName", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Debugger.scriptParsed") +@dataclass +class ScriptParsed: + """ + Fired when virtual machine parses script. This event is also fired for all known and uncollected + scripts upon enabling debugger. + """ + + #: Identifier of the script parsed. + script_id: runtime.ScriptId + #: URL or name of the script parsed (if any). + url: str + #: Line offset of the script within the resource with given URL (for script tags). + start_line: int + #: Column offset of the script within the resource with given URL. + start_column: int + #: Last line of the script. + end_line: int + #: Length of the last line of the script. + end_column: int + #: Specifies script creation context. + execution_context_id: runtime.ExecutionContextId + #: Content hash of the script, SHA-256. + hash_: str + #: Embedder-specific auxiliary data likely matching {isDefault: boolean, type: 'default'``'isolated'``'worker', frameId: string} + execution_context_aux_data: typing.Optional[dict] + #: True, if this script is generated as a result of the live edit operation. + is_live_edit: typing.Optional[bool] + #: URL of source map associated with script (if any). + source_map_url: typing.Optional[str] + #: True, if this script has sourceURL. + has_source_url: typing.Optional[bool] + #: True, if this script is ES6 module. + is_module: typing.Optional[bool] + #: This script length. + length: typing.Optional[int] + #: JavaScript top stack frame of where the script parsed event was triggered if available. + stack_trace: typing.Optional[runtime.StackTrace] + #: If the scriptLanguage is WebAssembly, the code section offset in the module. + code_offset: typing.Optional[int] + #: The language of the script. + script_language: typing.Optional[ScriptLanguage] + #: If the scriptLanguage is WebASsembly, the source of debug symbols for the module. + debug_symbols: typing.Optional[DebugSymbols] + #: The name the embedder supplied for this script. + embedder_name: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScriptParsed: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + url=str(json["url"]), + start_line=int(json["startLine"]), + start_column=int(json["startColumn"]), + end_line=int(json["endLine"]), + end_column=int(json["endColumn"]), + execution_context_id=runtime.ExecutionContextId.from_json( + json["executionContextId"] + ), + hash_=str(json["hash"]), + execution_context_aux_data=( + dict(json["executionContextAuxData"]) + if json.get("executionContextAuxData", None) is not None + else None + ), + is_live_edit=( + bool(json["isLiveEdit"]) + if json.get("isLiveEdit", None) is not None + else None + ), + source_map_url=( + str(json["sourceMapURL"]) + if json.get("sourceMapURL", None) is not None + else None + ), + has_source_url=( + bool(json["hasSourceURL"]) + if json.get("hasSourceURL", None) is not None + else None + ), + is_module=( + bool(json["isModule"]) + if json.get("isModule", None) is not None + else None + ), + length=( + int(json["length"]) if json.get("length", None) is not None else None + ), + stack_trace=( + runtime.StackTrace.from_json(json["stackTrace"]) + if json.get("stackTrace", None) is not None + else None + ), + code_offset=( + int(json["codeOffset"]) + if json.get("codeOffset", None) is not None + else None + ), + script_language=( + ScriptLanguage.from_json(json["scriptLanguage"]) + if json.get("scriptLanguage", None) is not None + else None + ), + debug_symbols=( + DebugSymbols.from_json(json["debugSymbols"]) + if json.get("debugSymbols", None) is not None + else None + ), + embedder_name=( + str(json["embedderName"]) + if json.get("embedderName", None) is not None + else None + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/device_access.html b/docs/_build/html/_modules/nodriver/cdp/device_access.html new file mode 100644 index 0000000..be5632b --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/device_access.html @@ -0,0 +1,468 @@ + + + + + + + + nodriver.cdp.device_access - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.device_access

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DeviceAccess (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class RequestId(str): + """ + Device request id. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RequestId: + return cls(json) + + def __repr__(self): + return "RequestId({})".format(super().__repr__())
+ + + +
+[docs] +class DeviceId(str): + """ + A device id. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> DeviceId: + return cls(json) + + def __repr__(self): + return "DeviceId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class PromptDevice: + """ + Device information displayed in a user prompt to select a device. + """ + + id_: DeviceId + + #: Display name as it appears in a device request user prompt. + name: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["name"] = self.name + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PromptDevice: + return cls( + id_=DeviceId.from_json(json["id"]), + name=str(json["name"]), + )
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable events in this domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "DeviceAccess.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disable events in this domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "DeviceAccess.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def select_prompt( + id_: RequestId, device_id: DeviceId +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Select a device in response to a DeviceAccess.deviceRequestPrompted event. + + :param id_: + :param device_id: + """ + params: T_JSON_DICT = dict() + params["id"] = id_.to_json() + params["deviceId"] = device_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DeviceAccess.selectPrompt", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def cancel_prompt(id_: RequestId) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Cancel a prompt in response to a DeviceAccess.deviceRequestPrompted event. + + :param id_: + """ + params: T_JSON_DICT = dict() + params["id"] = id_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DeviceAccess.cancelPrompt", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("DeviceAccess.deviceRequestPrompted") +@dataclass +class DeviceRequestPrompted: + """ + A device request opened a user prompt to select a device. Respond with the + selectPrompt or cancelPrompt command. + """ + + id_: RequestId + devices: typing.List[PromptDevice] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DeviceRequestPrompted: + return cls( + id_=RequestId.from_json(json["id"]), + devices=[PromptDevice.from_json(i) for i in json["devices"]], + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/device_orientation.html b/docs/_build/html/_modules/nodriver/cdp/device_orientation.html new file mode 100644 index 0000000..36cca3f --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/device_orientation.html @@ -0,0 +1,353 @@ + + + + + + + + nodriver.cdp.device_orientation - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.device_orientation

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DeviceOrientation (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +def clear_device_orientation_override() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, None] +): + """ + Clears the overridden Device Orientation. + """ + cmd_dict: T_JSON_DICT = { + "method": "DeviceOrientation.clearDeviceOrientationOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_device_orientation_override( + alpha: float, beta: float, gamma: float +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the Device Orientation. + + :param alpha: Mock alpha + :param beta: Mock beta + :param gamma: Mock gamma + """ + params: T_JSON_DICT = dict() + params["alpha"] = alpha + params["beta"] = beta + params["gamma"] = gamma + cmd_dict: T_JSON_DICT = { + "method": "DeviceOrientation.setDeviceOrientationOverride", + "params": params, + } + json = yield cmd_dict
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/dom.html b/docs/_build/html/_modules/nodriver/cdp/dom.html new file mode 100644 index 0000000..5f74222 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/dom.html @@ -0,0 +1,2706 @@ + + + + + + + + nodriver.cdp.dom - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.dom

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DOM
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import page
+from . import runtime
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class NodeId(int): + """ + Unique DOM node identifier. + """ + + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> NodeId: + return cls(json) + + def __repr__(self): + return "NodeId({})".format(super().__repr__())
+ + + +
+[docs] +class BackendNodeId(int): + """ + Unique DOM node identifier used to reference a node that may not have been pushed to the + front-end. + """ + + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> BackendNodeId: + return cls(json) + + def __repr__(self): + return "BackendNodeId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class BackendNode: + """ + Backend node with a friendly name. + """ + + #: ``Node``'s nodeType. + node_type: int + + #: ``Node``'s nodeName. + node_name: str + + backend_node_id: BackendNodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeType"] = self.node_type + json["nodeName"] = self.node_name + json["backendNodeId"] = self.backend_node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackendNode: + return cls( + node_type=int(json["nodeType"]), + node_name=str(json["nodeName"]), + backend_node_id=BackendNodeId.from_json(json["backendNodeId"]), + )
+ + + +
+[docs] +class PseudoType(enum.Enum): + """ + Pseudo element type. + """ + + FIRST_LINE = "first-line" + FIRST_LETTER = "first-letter" + BEFORE = "before" + AFTER = "after" + MARKER = "marker" + BACKDROP = "backdrop" + SELECTION = "selection" + SEARCH_TEXT = "search-text" + TARGET_TEXT = "target-text" + SPELLING_ERROR = "spelling-error" + GRAMMAR_ERROR = "grammar-error" + HIGHLIGHT = "highlight" + FIRST_LINE_INHERITED = "first-line-inherited" + SCROLL_MARKER = "scroll-marker" + SCROLL_MARKER_GROUP = "scroll-marker-group" + SCROLLBAR = "scrollbar" + SCROLLBAR_THUMB = "scrollbar-thumb" + SCROLLBAR_BUTTON = "scrollbar-button" + SCROLLBAR_TRACK = "scrollbar-track" + SCROLLBAR_TRACK_PIECE = "scrollbar-track-piece" + SCROLLBAR_CORNER = "scrollbar-corner" + RESIZER = "resizer" + INPUT_LIST_BUTTON = "input-list-button" + VIEW_TRANSITION = "view-transition" + VIEW_TRANSITION_GROUP = "view-transition-group" + VIEW_TRANSITION_IMAGE_PAIR = "view-transition-image-pair" + VIEW_TRANSITION_OLD = "view-transition-old" + VIEW_TRANSITION_NEW = "view-transition-new" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PseudoType: + return cls(json)
+ + + +
+[docs] +class ShadowRootType(enum.Enum): + """ + Shadow root type. + """ + + USER_AGENT = "user-agent" + OPEN_ = "open" + CLOSED = "closed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ShadowRootType: + return cls(json)
+ + + +
+[docs] +class CompatibilityMode(enum.Enum): + """ + Document compatibility mode. + """ + + QUIRKS_MODE = "QuirksMode" + LIMITED_QUIRKS_MODE = "LimitedQuirksMode" + NO_QUIRKS_MODE = "NoQuirksMode" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CompatibilityMode: + return cls(json)
+ + + +
+[docs] +class PhysicalAxes(enum.Enum): + """ + ContainerSelector physical axes + """ + + HORIZONTAL = "Horizontal" + VERTICAL = "Vertical" + BOTH = "Both" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PhysicalAxes: + return cls(json)
+ + + +
+[docs] +class LogicalAxes(enum.Enum): + """ + ContainerSelector logical axes + """ + + INLINE = "Inline" + BLOCK = "Block" + BOTH = "Both" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> LogicalAxes: + return cls(json)
+ + + +
+[docs] +class ScrollOrientation(enum.Enum): + """ + Physical scroll orientation + """ + + HORIZONTAL = "horizontal" + VERTICAL = "vertical" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ScrollOrientation: + return cls(json)
+ + + +
+[docs] +@dataclass +class Node: + """ + DOM interaction is implemented in terms of mirror objects that represent the actual DOM nodes. + DOMNode is a base node mirror type. + """ + + #: Node identifier that is passed into the rest of the DOM messages as the ``nodeId``. Backend + #: will only push node with given ``id`` once. It is aware of all requested nodes and will only + #: fire DOM events for nodes known to the client. + node_id: NodeId + + #: The BackendNodeId for this node. + backend_node_id: BackendNodeId + + #: ``Node``'s nodeType. + node_type: int + + #: ``Node``'s nodeName. + node_name: str + + #: ``Node``'s localName. + local_name: str + + #: ``Node``'s nodeValue. + node_value: str + + #: The id of the parent node if any. + parent_id: typing.Optional[NodeId] = None + + #: Child count for ``Container`` nodes. + child_node_count: typing.Optional[int] = None + + #: Child nodes of this node when requested with children. + children: typing.Optional[typing.List[Node]] = None + + #: Attributes of the ``Element`` node in the form of flat array ``[name1, value1, name2, value2]``. + attributes: typing.Optional[typing.List[str]] = None + + #: Document URL that ``Document`` or ``FrameOwner`` node points to. + document_url: typing.Optional[str] = None + + #: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion. + base_url: typing.Optional[str] = None + + #: ``DocumentType``'s publicId. + public_id: typing.Optional[str] = None + + #: ``DocumentType``'s systemId. + system_id: typing.Optional[str] = None + + #: ``DocumentType``'s internalSubset. + internal_subset: typing.Optional[str] = None + + #: ``Document``'s XML version in case of XML documents. + xml_version: typing.Optional[str] = None + + #: ``Attr``'s name. + name: typing.Optional[str] = None + + #: ``Attr``'s value. + value: typing.Optional[str] = None + + #: Pseudo element type for this node. + pseudo_type: typing.Optional[PseudoType] = None + + #: Pseudo element identifier for this node. Only present if there is a + #: valid pseudoType. + pseudo_identifier: typing.Optional[str] = None + + #: Shadow root type. + shadow_root_type: typing.Optional[ShadowRootType] = None + + #: Frame ID for frame owner elements. + frame_id: typing.Optional[page.FrameId] = None + + #: Content document for frame owner elements. + content_document: typing.Optional[Node] = None + + #: Shadow root list for given element host. + shadow_roots: typing.Optional[typing.List[Node]] = None + + #: Content document fragment for template elements. + template_content: typing.Optional[Node] = None + + #: Pseudo elements associated with this node. + pseudo_elements: typing.Optional[typing.List[Node]] = None + + #: Deprecated, as the HTML Imports API has been removed (crbug.com/937746). + #: This property used to return the imported document for the HTMLImport links. + #: The property is always undefined now. + imported_document: typing.Optional[Node] = None + + #: Distributed nodes for given insertion point. + distributed_nodes: typing.Optional[typing.List[BackendNode]] = None + + #: Whether the node is SVG. + is_svg: typing.Optional[bool] = None + + compatibility_mode: typing.Optional[CompatibilityMode] = None + + assigned_slot: typing.Optional[BackendNode] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeId"] = self.node_id.to_json() + json["backendNodeId"] = self.backend_node_id.to_json() + json["nodeType"] = self.node_type + json["nodeName"] = self.node_name + json["localName"] = self.local_name + json["nodeValue"] = self.node_value + if self.parent_id is not None: + json["parentId"] = self.parent_id.to_json() + if self.child_node_count is not None: + json["childNodeCount"] = self.child_node_count + if self.children is not None: + json["children"] = [i.to_json() for i in self.children] + if self.attributes is not None: + json["attributes"] = [i for i in self.attributes] + if self.document_url is not None: + json["documentURL"] = self.document_url + if self.base_url is not None: + json["baseURL"] = self.base_url + if self.public_id is not None: + json["publicId"] = self.public_id + if self.system_id is not None: + json["systemId"] = self.system_id + if self.internal_subset is not None: + json["internalSubset"] = self.internal_subset + if self.xml_version is not None: + json["xmlVersion"] = self.xml_version + if self.name is not None: + json["name"] = self.name + if self.value is not None: + json["value"] = self.value + if self.pseudo_type is not None: + json["pseudoType"] = self.pseudo_type.to_json() + if self.pseudo_identifier is not None: + json["pseudoIdentifier"] = self.pseudo_identifier + if self.shadow_root_type is not None: + json["shadowRootType"] = self.shadow_root_type.to_json() + if self.frame_id is not None: + json["frameId"] = self.frame_id.to_json() + if self.content_document is not None: + json["contentDocument"] = self.content_document.to_json() + if self.shadow_roots is not None: + json["shadowRoots"] = [i.to_json() for i in self.shadow_roots] + if self.template_content is not None: + json["templateContent"] = self.template_content.to_json() + if self.pseudo_elements is not None: + json["pseudoElements"] = [i.to_json() for i in self.pseudo_elements] + if self.imported_document is not None: + json["importedDocument"] = self.imported_document.to_json() + if self.distributed_nodes is not None: + json["distributedNodes"] = [i.to_json() for i in self.distributed_nodes] + if self.is_svg is not None: + json["isSVG"] = self.is_svg + if self.compatibility_mode is not None: + json["compatibilityMode"] = self.compatibility_mode.to_json() + if self.assigned_slot is not None: + json["assignedSlot"] = self.assigned_slot.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Node: + return cls( + node_id=NodeId.from_json(json["nodeId"]), + backend_node_id=BackendNodeId.from_json(json["backendNodeId"]), + node_type=int(json["nodeType"]), + node_name=str(json["nodeName"]), + local_name=str(json["localName"]), + node_value=str(json["nodeValue"]), + parent_id=( + NodeId.from_json(json["parentId"]) + if json.get("parentId", None) is not None + else None + ), + child_node_count=( + int(json["childNodeCount"]) + if json.get("childNodeCount", None) is not None + else None + ), + children=( + [Node.from_json(i) for i in json["children"]] + if json.get("children", None) is not None + else None + ), + attributes=( + [str(i) for i in json["attributes"]] + if json.get("attributes", None) is not None + else None + ), + document_url=( + str(json["documentURL"]) + if json.get("documentURL", None) is not None + else None + ), + base_url=( + str(json["baseURL"]) if json.get("baseURL", None) is not None else None + ), + public_id=( + str(json["publicId"]) + if json.get("publicId", None) is not None + else None + ), + system_id=( + str(json["systemId"]) + if json.get("systemId", None) is not None + else None + ), + internal_subset=( + str(json["internalSubset"]) + if json.get("internalSubset", None) is not None + else None + ), + xml_version=( + str(json["xmlVersion"]) + if json.get("xmlVersion", None) is not None + else None + ), + name=str(json["name"]) if json.get("name", None) is not None else None, + value=str(json["value"]) if json.get("value", None) is not None else None, + pseudo_type=( + PseudoType.from_json(json["pseudoType"]) + if json.get("pseudoType", None) is not None + else None + ), + pseudo_identifier=( + str(json["pseudoIdentifier"]) + if json.get("pseudoIdentifier", None) is not None + else None + ), + shadow_root_type=( + ShadowRootType.from_json(json["shadowRootType"]) + if json.get("shadowRootType", None) is not None + else None + ), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + content_document=( + Node.from_json(json["contentDocument"]) + if json.get("contentDocument", None) is not None + else None + ), + shadow_roots=( + [Node.from_json(i) for i in json["shadowRoots"]] + if json.get("shadowRoots", None) is not None + else None + ), + template_content=( + Node.from_json(json["templateContent"]) + if json.get("templateContent", None) is not None + else None + ), + pseudo_elements=( + [Node.from_json(i) for i in json["pseudoElements"]] + if json.get("pseudoElements", None) is not None + else None + ), + imported_document=( + Node.from_json(json["importedDocument"]) + if json.get("importedDocument", None) is not None + else None + ), + distributed_nodes=( + [BackendNode.from_json(i) for i in json["distributedNodes"]] + if json.get("distributedNodes", None) is not None + else None + ), + is_svg=bool(json["isSVG"]) if json.get("isSVG", None) is not None else None, + compatibility_mode=( + CompatibilityMode.from_json(json["compatibilityMode"]) + if json.get("compatibilityMode", None) is not None + else None + ), + assigned_slot=( + BackendNode.from_json(json["assignedSlot"]) + if json.get("assignedSlot", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class RGBA: + """ + A structure holding an RGBA color. + """ + + #: The red component, in the [0-255] range. + r: int + + #: The green component, in the [0-255] range. + g: int + + #: The blue component, in the [0-255] range. + b: int + + #: The alpha component, in the [0-1] range (default: 1). + a: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["r"] = self.r + json["g"] = self.g + json["b"] = self.b + if self.a is not None: + json["a"] = self.a + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RGBA: + return cls( + r=int(json["r"]), + g=int(json["g"]), + b=int(json["b"]), + a=float(json["a"]) if json.get("a", None) is not None else None, + )
+ + + +
+[docs] +class Quad(list): + """ + An array of quad vertices, x immediately followed by y for each point, points clock-wise. + """ + + def to_json(self) -> typing.List[float]: + return self + + @classmethod + def from_json(cls, json: typing.List[float]) -> Quad: + return cls(json) + + def __repr__(self): + return "Quad({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class BoxModel: + """ + Box model. + """ + + #: Content box + content: Quad + + #: Padding box + padding: Quad + + #: Border box + border: Quad + + #: Margin box + margin: Quad + + #: Node width + width: int + + #: Node height + height: int + + #: Shape outside coordinates + shape_outside: typing.Optional[ShapeOutsideInfo] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["content"] = self.content.to_json() + json["padding"] = self.padding.to_json() + json["border"] = self.border.to_json() + json["margin"] = self.margin.to_json() + json["width"] = self.width + json["height"] = self.height + if self.shape_outside is not None: + json["shapeOutside"] = self.shape_outside.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BoxModel: + return cls( + content=Quad.from_json(json["content"]), + padding=Quad.from_json(json["padding"]), + border=Quad.from_json(json["border"]), + margin=Quad.from_json(json["margin"]), + width=int(json["width"]), + height=int(json["height"]), + shape_outside=( + ShapeOutsideInfo.from_json(json["shapeOutside"]) + if json.get("shapeOutside", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ShapeOutsideInfo: + """ + CSS Shape Outside details. + """ + + #: Shape bounds + bounds: Quad + + #: Shape coordinate details + shape: typing.List[typing.Any] + + #: Margin shape bounds + margin_shape: typing.List[typing.Any] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["bounds"] = self.bounds.to_json() + json["shape"] = [i for i in self.shape] + json["marginShape"] = [i for i in self.margin_shape] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ShapeOutsideInfo: + return cls( + bounds=Quad.from_json(json["bounds"]), + shape=[i for i in json["shape"]], + margin_shape=[i for i in json["marginShape"]], + )
+ + + +
+[docs] +@dataclass +class Rect: + """ + Rectangle. + """ + + #: X coordinate + x: float + + #: Y coordinate + y: float + + #: Rectangle width + width: float + + #: Rectangle height + height: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + json["width"] = self.width + json["height"] = self.height + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Rect: + return cls( + x=float(json["x"]), + y=float(json["y"]), + width=float(json["width"]), + height=float(json["height"]), + )
+ + + +
+[docs] +@dataclass +class CSSComputedStyleProperty: + #: Computed style property name. + name: str + + #: Computed style property value. + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CSSComputedStyleProperty: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +def collect_class_names_from_subtree( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Collects class names for the node with given id and all of it's child nodes. + + **EXPERIMENTAL** + + :param node_id: Id of the node to collect class names. + :returns: Class name list. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.collectClassNamesFromSubtree", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["classNames"]]
+ + + +
+[docs] +def copy_to( + node_id: NodeId, + target_node_id: NodeId, + insert_before_node_id: typing.Optional[NodeId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Creates a deep copy of the specified node and places it into the target container before the + given anchor. + + **EXPERIMENTAL** + + :param node_id: Id of the node to copy. + :param target_node_id: Id of the element to drop the copy into. + :param insert_before_node_id: *(Optional)* Drop the copy before this node (if absent, the copy becomes the last child of ```targetNodeId```). + :returns: Id of the node clone. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["targetNodeId"] = target_node_id.to_json() + if insert_before_node_id is not None: + params["insertBeforeNodeId"] = insert_before_node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.copyTo", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def describe_node( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, + depth: typing.Optional[int] = None, + pierce: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Node]: + """ + Describes node given its id, does not require domain to be enabled. Does not start tracking any + objects, can be used for automation. + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + :param depth: *(Optional)* The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0. + :param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). + :returns: Node description. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + if depth is not None: + params["depth"] = depth + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOM.describeNode", + "params": params, + } + json = yield cmd_dict + return Node.from_json(json["node"])
+ + + +
+[docs] +def scroll_into_view_if_needed( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, + rect: typing.Optional[Rect] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Scrolls the specified rect of the given node into view if not already visible. + Note: exactly one between nodeId, backendNodeId and objectId should be passed + to identify the node. + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + :param rect: *(Optional)* The rect to be scrolled into view, relative to the node's border box, in CSS pixels. When omitted, center of the node will be used, similar to Element.scrollIntoView. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + if rect is not None: + params["rect"] = rect.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.scrollIntoViewIfNeeded", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables DOM agent for the given page. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def discard_search_results( + search_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Discards search results from the session with the given id. ``getSearchResults`` should no longer + be called for that search. + + **EXPERIMENTAL** + + :param search_id: Unique search session identifier. + """ + params: T_JSON_DICT = dict() + params["searchId"] = search_id + cmd_dict: T_JSON_DICT = { + "method": "DOM.discardSearchResults", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def enable( + include_whitespace: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables DOM agent for the given page. + + :param include_whitespace: **(EXPERIMENTAL)** *(Optional)* Whether to include whitespaces in the children array of returned Nodes. + """ + params: T_JSON_DICT = dict() + if include_whitespace is not None: + params["includeWhitespace"] = include_whitespace + cmd_dict: T_JSON_DICT = { + "method": "DOM.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def focus( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Focuses the given element. + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.focus", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_attributes( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Returns attributes for the specified node. + + :param node_id: Id of the node to retrieve attributes for. + :returns: An interleaved array of node attribute names and values. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getAttributes", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["attributes"]]
+ + + +
+[docs] +def get_box_model( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, BoxModel]: + """ + Returns boxes for the given node. + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + :returns: Box model for the node. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getBoxModel", + "params": params, + } + json = yield cmd_dict + return BoxModel.from_json(json["model"])
+ + + +
+[docs] +def get_content_quads( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Quad]]: + """ + Returns quads that describe node position on the page. This method + might return multiple quads for inline nodes. + + **EXPERIMENTAL** + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + :returns: Quads that describe node layout relative to viewport. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getContentQuads", + "params": params, + } + json = yield cmd_dict + return [Quad.from_json(i) for i in json["quads"]]
+ + + +
+[docs] +def get_document( + depth: typing.Optional[int] = None, pierce: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Node]: + """ + Returns the root DOM node (and optionally the subtree) to the caller. + Implicitly enables the DOM domain events for the current target. + + :param depth: *(Optional)* The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0. + :param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). + :returns: Resulting node. + """ + params: T_JSON_DICT = dict() + if depth is not None: + params["depth"] = depth + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOM.getDocument", + "params": params, + } + json = yield cmd_dict + return Node.from_json(json["root"])
+ + + +
+[docs] +@deprecated(version="1.3") +def get_flattened_document( + depth: typing.Optional[int] = None, pierce: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Node]]: + """ + Returns the root DOM node (and optionally the subtree) to the caller. + Deprecated, as it is not designed to work well with the rest of the DOM agent. + Use DOMSnapshot.captureSnapshot instead. + + .. deprecated:: 1.3 + + :param depth: *(Optional)* The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0. + :param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). + :returns: Resulting node. + """ + params: T_JSON_DICT = dict() + if depth is not None: + params["depth"] = depth + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOM.getFlattenedDocument", + "params": params, + } + json = yield cmd_dict + return [Node.from_json(i) for i in json["nodes"]]
+ + + +
+[docs] +def get_nodes_for_subtree_by_style( + node_id: NodeId, + computed_styles: typing.List[CSSComputedStyleProperty], + pierce: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]]: + """ + Finds nodes with a given computed style in a subtree. + + **EXPERIMENTAL** + + :param node_id: Node ID pointing to the root of a subtree. + :param computed_styles: The style to filter nodes by (includes nodes if any of properties matches). + :param pierce: *(Optional)* Whether or not iframes and shadow roots in the same target should be traversed when returning the results (default is false). + :returns: Resulting nodes. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["computedStyles"] = [i.to_json() for i in computed_styles] + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOM.getNodesForSubtreeByStyle", + "params": params, + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def get_node_for_location( + x: int, + y: int, + include_user_agent_shadow_dom: typing.Optional[bool] = None, + ignore_pointer_events_none: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[BackendNodeId, page.FrameId, typing.Optional[NodeId]], +]: + """ + Returns node id at given location. Depending on whether DOM domain is enabled, nodeId is + either returned or not. + + :param x: X coordinate. + :param y: Y coordinate. + :param include_user_agent_shadow_dom: *(Optional)* False to skip to the nearest non-UA shadow root ancestor (default: false). + :param ignore_pointer_events_none: *(Optional)* Whether to ignore pointer-events: none on elements and hit test them. + :returns: A tuple with the following items: + + 0. **backendNodeId** - Resulting node. + 1. **frameId** - Frame this node belongs to. + 2. **nodeId** - *(Optional)* Id of the node at given coordinates, only when enabled and requested document. + """ + params: T_JSON_DICT = dict() + params["x"] = x + params["y"] = y + if include_user_agent_shadow_dom is not None: + params["includeUserAgentShadowDOM"] = include_user_agent_shadow_dom + if ignore_pointer_events_none is not None: + params["ignorePointerEventsNone"] = ignore_pointer_events_none + cmd_dict: T_JSON_DICT = { + "method": "DOM.getNodeForLocation", + "params": params, + } + json = yield cmd_dict + return ( + BackendNodeId.from_json(json["backendNodeId"]), + page.FrameId.from_json(json["frameId"]), + ( + NodeId.from_json(json["nodeId"]) + if json.get("nodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_outer_html( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns node's HTML markup. + + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + :returns: Outer HTML markup. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getOuterHTML", + "params": params, + } + json = yield cmd_dict + return str(json["outerHTML"])
+ + + +
+[docs] +def get_relayout_boundary( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Returns the id of the nearest ancestor that is a relayout boundary. + + **EXPERIMENTAL** + + :param node_id: Id of the node. + :returns: Relayout boundary node id for the given node. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getRelayoutBoundary", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def get_search_results( + search_id: str, from_index: int, to_index: int +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]]: + """ + Returns search results from given ``fromIndex`` to given ``toIndex`` from the search with the given + identifier. + + **EXPERIMENTAL** + + :param search_id: Unique search session identifier. + :param from_index: Start index of the search result to be returned. + :param to_index: End index of the search result to be returned. + :returns: Ids of the search result nodes. + """ + params: T_JSON_DICT = dict() + params["searchId"] = search_id + params["fromIndex"] = from_index + params["toIndex"] = to_index + cmd_dict: T_JSON_DICT = { + "method": "DOM.getSearchResults", + "params": params, + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def hide_highlight() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Hides any highlight. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.hideHighlight", + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_node() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights DOM node. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.highlightNode", + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_rect() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights given rectangle. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.highlightRect", + } + json = yield cmd_dict
+ + + +
+[docs] +def mark_undoable_state() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Marks last undoable state. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.markUndoableState", + } + json = yield cmd_dict
+ + + +
+[docs] +def move_to( + node_id: NodeId, + target_node_id: NodeId, + insert_before_node_id: typing.Optional[NodeId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Moves node into the new container, places it before the given anchor. + + :param node_id: Id of the node to move. + :param target_node_id: Id of the element to drop the moved node into. + :param insert_before_node_id: *(Optional)* Drop node before this one (if absent, the moved node becomes the last child of ```targetNodeId```). + :returns: New id of the moved node. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["targetNodeId"] = target_node_id.to_json() + if insert_before_node_id is not None: + params["insertBeforeNodeId"] = insert_before_node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.moveTo", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + + + + + +
+[docs] +def push_node_by_path_to_frontend( + path: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Requests that the node is sent to the caller given its path. // FIXME, use XPath + + **EXPERIMENTAL** + + :param path: Path to node in the proprietary format. + :returns: Id of the node for given path. + """ + params: T_JSON_DICT = dict() + params["path"] = path + cmd_dict: T_JSON_DICT = { + "method": "DOM.pushNodeByPathToFrontend", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def push_nodes_by_backend_ids_to_frontend( + backend_node_ids: typing.List[BackendNodeId], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]]: + """ + Requests that a batch of nodes is sent to the caller given their backend node ids. + + **EXPERIMENTAL** + + :param backend_node_ids: The array of backend node ids. + :returns: The array of ids of pushed nodes that correspond to the backend ids specified in backendNodeIds. + """ + params: T_JSON_DICT = dict() + params["backendNodeIds"] = [i.to_json() for i in backend_node_ids] + cmd_dict: T_JSON_DICT = { + "method": "DOM.pushNodesByBackendIdsToFrontend", + "params": params, + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def query_selector( + node_id: NodeId, selector: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Executes ``querySelector`` on a given node. + + :param node_id: Id of the node to query upon. + :param selector: Selector string. + :returns: Query selector result. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["selector"] = selector + cmd_dict: T_JSON_DICT = { + "method": "DOM.querySelector", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def query_selector_all( + node_id: NodeId, selector: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]]: + """ + Executes ``querySelectorAll`` on a given node. + + :param node_id: Id of the node to query upon. + :param selector: Selector string. + :returns: Query selector result. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["selector"] = selector + cmd_dict: T_JSON_DICT = { + "method": "DOM.querySelectorAll", + "params": params, + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def get_top_layer_elements() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]] +): + """ + Returns NodeIds of current top layer elements. + Top layer is rendered closest to the user within a viewport, therefore its elements always + appear on top of all other content. + + **EXPERIMENTAL** + + :returns: NodeIds of top layer elements + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.getTopLayerElements", + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def get_element_by_relation( + node_id: NodeId, relation: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Returns the NodeId of the matched element according to certain relations. + + **EXPERIMENTAL** + + :param node_id: Id of the node from which to query the relation. + :param relation: Type of relation to get. + :returns: NodeId of the element matching the queried relation. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["relation"] = relation + cmd_dict: T_JSON_DICT = { + "method": "DOM.getElementByRelation", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def redo() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Re-does the last undone action. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.redo", + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_attribute( + node_id: NodeId, name: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes attribute with given name from an element with given id. + + :param node_id: Id of the element to remove attribute from. + :param name: Name of the attribute to remove. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["name"] = name + cmd_dict: T_JSON_DICT = { + "method": "DOM.removeAttribute", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_node(node_id: NodeId) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes node with given id. + + :param node_id: Id of the node to remove. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.removeNode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def request_child_nodes( + node_id: NodeId, + depth: typing.Optional[int] = None, + pierce: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that children of the node with given id are returned to the caller in form of + ``setChildNodes`` events where not only immediate children are retrieved, but all children down to + the specified depth. + + :param node_id: Id of the node to get children for. + :param depth: *(Optional)* The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0. + :param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the sub-tree (default is false). + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + if depth is not None: + params["depth"] = depth + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOM.requestChildNodes", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def request_node( + object_id: runtime.RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Requests that the node is sent to the caller given the JavaScript node object reference. All + nodes that form the path from the node to the root are also sent to the client as a series of + ``setChildNodes`` notifications. + + :param object_id: JavaScript object id to convert into node. + :returns: Node id for given object. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.requestNode", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def resolve_node( + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_group: typing.Optional[str] = None, + execution_context_id: typing.Optional[runtime.ExecutionContextId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.RemoteObject]: + """ + Resolves the JavaScript node object for a given NodeId or BackendNodeId. + + :param node_id: *(Optional)* Id of the node to resolve. + :param backend_node_id: *(Optional)* Backend identifier of the node to resolve. + :param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. + :param execution_context_id: *(Optional)* Execution context in which to resolve the node. + :returns: JavaScript object wrapper for given node. + """ + params: T_JSON_DICT = dict() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_group is not None: + params["objectGroup"] = object_group + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.resolveNode", + "params": params, + } + json = yield cmd_dict + return runtime.RemoteObject.from_json(json["object"])
+ + + +
+[docs] +def set_attribute_value( + node_id: NodeId, name: str, value: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets attribute for an element with given id. + + :param node_id: Id of the element to set attribute for. + :param name: Attribute name. + :param value: Attribute value. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["name"] = name + params["value"] = value + cmd_dict: T_JSON_DICT = { + "method": "DOM.setAttributeValue", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_attributes_as_text( + node_id: NodeId, text: str, name: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets attributes on element with given id. This method is useful when user edits some existing + attribute value and types in several attribute name/value pairs. + + :param node_id: Id of the element to set attributes for. + :param text: Text with a number of attributes. Will parse this text using HTML parser. + :param name: *(Optional)* Attribute name to replace with new attributes derived from text in case text parsed successfully. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["text"] = text + if name is not None: + params["name"] = name + cmd_dict: T_JSON_DICT = { + "method": "DOM.setAttributesAsText", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_file_input_files( + files: typing.List[str], + node_id: typing.Optional[NodeId] = None, + backend_node_id: typing.Optional[BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets files for the given file input element. + + :param files: Array of file paths to set. + :param node_id: *(Optional)* Identifier of the node. + :param backend_node_id: *(Optional)* Identifier of the backend node. + :param object_id: *(Optional)* JavaScript object id of the node wrapper. + """ + params: T_JSON_DICT = dict() + params["files"] = [i for i in files] + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.setFileInputFiles", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_node_stack_traces_enabled( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets if stack traces should be captured for Nodes. See ``Node.getNodeStackTraces``. Default is disabled. + + **EXPERIMENTAL** + + :param enable: Enable or disable. + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "DOM.setNodeStackTracesEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_node_stack_traces( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[runtime.StackTrace]]: + """ + Gets stack traces associated with a Node. As of now, only provides stack trace for Node creation. + + **EXPERIMENTAL** + + :param node_id: Id of the node to get stack traces for. + :returns: *(Optional)* Creation stack trace, if available. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getNodeStackTraces", + "params": params, + } + json = yield cmd_dict + return ( + runtime.StackTrace.from_json(json["creation"]) + if json.get("creation", None) is not None + else None + )
+ + + +
+[docs] +def get_file_info( + object_id: runtime.RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns file information for the given + File wrapper. + + **EXPERIMENTAL** + + :param object_id: JavaScript object id of the node wrapper. + :returns: + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getFileInfo", + "params": params, + } + json = yield cmd_dict + return str(json["path"])
+ + + +
+[docs] +def set_inspected_node( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables console to refer to the node with given id via $x (see Command Line API for more details + $x functions). + + **EXPERIMENTAL** + + :param node_id: DOM node id to be accessible by means of $x command line API. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.setInspectedNode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_node_name( + node_id: NodeId, name: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Sets node name for a node with given id. + + :param node_id: Id of the node to set name for. + :param name: New node's name. + :returns: New node's id. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["name"] = name + cmd_dict: T_JSON_DICT = { + "method": "DOM.setNodeName", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +def set_node_value( + node_id: NodeId, value: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets node value for a node with given id. + + :param node_id: Id of the node to set value for. + :param value: New node's value. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["value"] = value + cmd_dict: T_JSON_DICT = { + "method": "DOM.setNodeValue", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_outer_html( + node_id: NodeId, outer_html: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets node HTML markup, returns new node id. + + :param node_id: Id of the node to set markup for. + :param outer_html: Outer HTML markup to set. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["outerHTML"] = outer_html + cmd_dict: T_JSON_DICT = { + "method": "DOM.setOuterHTML", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def undo() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Undoes the last performed action. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "DOM.undo", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_frame_owner( + frame_id: page.FrameId, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[BackendNodeId, typing.Optional[NodeId]] +]: + """ + Returns iframe node that owns iframe with the given domain. + + **EXPERIMENTAL** + + :param frame_id: + :returns: A tuple with the following items: + + 0. **backendNodeId** - Resulting node. + 1. **nodeId** - *(Optional)* Id of the node at given coordinates, only when enabled and requested document. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getFrameOwner", + "params": params, + } + json = yield cmd_dict + return ( + BackendNodeId.from_json(json["backendNodeId"]), + ( + NodeId.from_json(json["nodeId"]) + if json.get("nodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_container_for_node( + node_id: NodeId, + container_name: typing.Optional[str] = None, + physical_axes: typing.Optional[PhysicalAxes] = None, + logical_axes: typing.Optional[LogicalAxes] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[NodeId]]: + """ + Returns the query container of the given node based on container query + conditions: containerName, physical, and logical axes. If no axes are + provided, the style container is returned, which is the direct parent or the + closest element with a matching container-name. + + **EXPERIMENTAL** + + :param node_id: + :param container_name: *(Optional)* + :param physical_axes: *(Optional)* + :param logical_axes: *(Optional)* + :returns: *(Optional)* The container node for the given node, or null if not found. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + if container_name is not None: + params["containerName"] = container_name + if physical_axes is not None: + params["physicalAxes"] = physical_axes.to_json() + if logical_axes is not None: + params["logicalAxes"] = logical_axes.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getContainerForNode", + "params": params, + } + json = yield cmd_dict + return ( + NodeId.from_json(json["nodeId"]) + if json.get("nodeId", None) is not None + else None + )
+ + + +
+[docs] +def get_querying_descendants_for_container( + node_id: NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[NodeId]]: + """ + Returns the descendants of a container query container that have + container queries against this container. + + **EXPERIMENTAL** + + :param node_id: Id of the container node to find querying descendants from. + :returns: Descendant nodes with container queries against the given container. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOM.getQueryingDescendantsForContainer", + "params": params, + } + json = yield cmd_dict + return [NodeId.from_json(i) for i in json["nodeIds"]]
+ + + +
+[docs] +def get_anchor_element( + node_id: NodeId, anchor_specifier: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, NodeId]: + """ + Returns the target anchor element of the given anchor query according to + https://www.w3.org/TR/css-anchor-position-1/#target. + + **EXPERIMENTAL** + + :param node_id: Id of the positioned element from which to find the anchor. + :param anchor_specifier: *(Optional)* An optional anchor specifier, as defined in https://www.w3.org/TR/css-anchor-position-1/#anchor-specifier. If not provided, it will return the implicit anchor element for the given positioned element. + :returns: The anchor element of the given anchor query. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + if anchor_specifier is not None: + params["anchorSpecifier"] = anchor_specifier + cmd_dict: T_JSON_DICT = { + "method": "DOM.getAnchorElement", + "params": params, + } + json = yield cmd_dict + return NodeId.from_json(json["nodeId"])
+ + + +
+[docs] +@event_class("DOM.attributeModified") +@dataclass +class AttributeModified: + """ + Fired when ``Element``'s attribute is modified. + """ + + #: Id of the node that has changed. + node_id: NodeId + #: Attribute name. + name: str + #: Attribute value. + value: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributeModified: + return cls( + node_id=NodeId.from_json(json["nodeId"]), + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@event_class("DOM.attributeRemoved") +@dataclass +class AttributeRemoved: + """ + Fired when ``Element``'s attribute is removed. + """ + + #: Id of the node that has changed. + node_id: NodeId + #: A ttribute name. + name: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributeRemoved: + return cls(node_id=NodeId.from_json(json["nodeId"]), name=str(json["name"]))
+ + + +
+[docs] +@event_class("DOM.characterDataModified") +@dataclass +class CharacterDataModified: + """ + Mirrors ``DOMCharacterDataModified`` event. + """ + + #: Id of the node that has changed. + node_id: NodeId + #: New text value. + character_data: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CharacterDataModified: + return cls( + node_id=NodeId.from_json(json["nodeId"]), + character_data=str(json["characterData"]), + )
+ + + +
+[docs] +@event_class("DOM.childNodeCountUpdated") +@dataclass +class ChildNodeCountUpdated: + """ + Fired when ``Container``'s child node count has changed. + """ + + #: Id of the node that has changed. + node_id: NodeId + #: New node count. + child_node_count: int + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ChildNodeCountUpdated: + return cls( + node_id=NodeId.from_json(json["nodeId"]), + child_node_count=int(json["childNodeCount"]), + )
+ + + +
+[docs] +@event_class("DOM.childNodeInserted") +@dataclass +class ChildNodeInserted: + """ + Mirrors ``DOMNodeInserted`` event. + """ + + #: Id of the node that has changed. + parent_node_id: NodeId + #: Id of the previous sibling. + previous_node_id: NodeId + #: Inserted node data. + node: Node + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ChildNodeInserted: + return cls( + parent_node_id=NodeId.from_json(json["parentNodeId"]), + previous_node_id=NodeId.from_json(json["previousNodeId"]), + node=Node.from_json(json["node"]), + )
+ + + +
+[docs] +@event_class("DOM.childNodeRemoved") +@dataclass +class ChildNodeRemoved: + """ + Mirrors ``DOMNodeRemoved`` event. + """ + + #: Parent id. + parent_node_id: NodeId + #: Id of the node that has been removed. + node_id: NodeId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ChildNodeRemoved: + return cls( + parent_node_id=NodeId.from_json(json["parentNodeId"]), + node_id=NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@event_class("DOM.distributedNodesUpdated") +@dataclass +class DistributedNodesUpdated: + """ + **EXPERIMENTAL** + + Called when distribution is changed. + """ + + #: Insertion point where distributed nodes were updated. + insertion_point_id: NodeId + #: Distributed nodes for given insertion point. + distributed_nodes: typing.List[BackendNode] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DistributedNodesUpdated: + return cls( + insertion_point_id=NodeId.from_json(json["insertionPointId"]), + distributed_nodes=[ + BackendNode.from_json(i) for i in json["distributedNodes"] + ], + )
+ + + +
+[docs] +@event_class("DOM.documentUpdated") +@dataclass +class DocumentUpdated: + """ + Fired when ``Document`` has been totally updated. Node ids are no longer valid. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DocumentUpdated: + return cls()
+ + + +
+[docs] +@event_class("DOM.inlineStyleInvalidated") +@dataclass +class InlineStyleInvalidated: + """ + **EXPERIMENTAL** + + Fired when ``Element``'s inline style is modified via a CSS property modification. + """ + + #: Ids of the nodes for which the inline styles have been invalidated. + node_ids: typing.List[NodeId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InlineStyleInvalidated: + return cls(node_ids=[NodeId.from_json(i) for i in json["nodeIds"]])
+ + + +
+[docs] +@event_class("DOM.pseudoElementAdded") +@dataclass +class PseudoElementAdded: + """ + **EXPERIMENTAL** + + Called when a pseudo element is added to an element. + """ + + #: Pseudo element's parent element id. + parent_id: NodeId + #: The added pseudo element. + pseudo_element: Node + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PseudoElementAdded: + return cls( + parent_id=NodeId.from_json(json["parentId"]), + pseudo_element=Node.from_json(json["pseudoElement"]), + )
+ + + +
+[docs] +@event_class("DOM.topLayerElementsUpdated") +@dataclass +class TopLayerElementsUpdated: + """ + **EXPERIMENTAL** + + Called when top layer elements are changed. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TopLayerElementsUpdated: + return cls()
+ + + +
+[docs] +@event_class("DOM.pseudoElementRemoved") +@dataclass +class PseudoElementRemoved: + """ + **EXPERIMENTAL** + + Called when a pseudo element is removed from an element. + """ + + #: Pseudo element's parent element id. + parent_id: NodeId + #: The removed pseudo element id. + pseudo_element_id: NodeId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PseudoElementRemoved: + return cls( + parent_id=NodeId.from_json(json["parentId"]), + pseudo_element_id=NodeId.from_json(json["pseudoElementId"]), + )
+ + + +
+[docs] +@event_class("DOM.setChildNodes") +@dataclass +class SetChildNodes: + """ + Fired when backend wants to provide client with the missing DOM structure. This happens upon + most of the calls requesting node ids. + """ + + #: Parent node id to populate with children. + parent_id: NodeId + #: Child nodes array. + nodes: typing.List[Node] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SetChildNodes: + return cls( + parent_id=NodeId.from_json(json["parentId"]), + nodes=[Node.from_json(i) for i in json["nodes"]], + )
+ + + +
+[docs] +@event_class("DOM.shadowRootPopped") +@dataclass +class ShadowRootPopped: + """ + **EXPERIMENTAL** + + Called when shadow root is popped from the element. + """ + + #: Host element id. + host_id: NodeId + #: Shadow root id. + root_id: NodeId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ShadowRootPopped: + return cls( + host_id=NodeId.from_json(json["hostId"]), + root_id=NodeId.from_json(json["rootId"]), + )
+ + + +
+[docs] +@event_class("DOM.shadowRootPushed") +@dataclass +class ShadowRootPushed: + """ + **EXPERIMENTAL** + + Called when shadow root is pushed into the element. + """ + + #: Host element id. + host_id: NodeId + #: Shadow root. + root: Node + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ShadowRootPushed: + return cls( + host_id=NodeId.from_json(json["hostId"]), root=Node.from_json(json["root"]) + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/dom_debugger.html b/docs/_build/html/_modules/nodriver/cdp/dom_debugger.html new file mode 100644 index 0000000..ab9e57e --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/dom_debugger.html @@ -0,0 +1,669 @@ + + + + + + + + nodriver.cdp.dom_debugger - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.dom_debugger

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DOMDebugger
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import runtime
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class DOMBreakpointType(enum.Enum): + """ + DOM breakpoint type. + """ + + SUBTREE_MODIFIED = "subtree-modified" + ATTRIBUTE_MODIFIED = "attribute-modified" + NODE_REMOVED = "node-removed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DOMBreakpointType: + return cls(json)
+ + + +
+[docs] +class CSPViolationType(enum.Enum): + """ + CSP Violation type. + """ + + TRUSTEDTYPE_SINK_VIOLATION = "trustedtype-sink-violation" + TRUSTEDTYPE_POLICY_VIOLATION = "trustedtype-policy-violation" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CSPViolationType: + return cls(json)
+ + + +
+[docs] +@dataclass +class EventListener: + """ + Object event listener. + """ + + #: ``EventListener``'s type. + type_: str + + #: ``EventListener``'s useCapture. + use_capture: bool + + #: ``EventListener``'s passive flag. + passive: bool + + #: ``EventListener``'s once flag. + once: bool + + #: Script id of the handler code. + script_id: runtime.ScriptId + + #: Line number in the script (0-based). + line_number: int + + #: Column number in the script (0-based). + column_number: int + + #: Event handler function value. + handler: typing.Optional[runtime.RemoteObject] = None + + #: Event original handler function value. + original_handler: typing.Optional[runtime.RemoteObject] = None + + #: Node the listener is added to (if any). + backend_node_id: typing.Optional[dom.BackendNodeId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + json["useCapture"] = self.use_capture + json["passive"] = self.passive + json["once"] = self.once + json["scriptId"] = self.script_id.to_json() + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + if self.handler is not None: + json["handler"] = self.handler.to_json() + if self.original_handler is not None: + json["originalHandler"] = self.original_handler.to_json() + if self.backend_node_id is not None: + json["backendNodeId"] = self.backend_node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> EventListener: + return cls( + type_=str(json["type"]), + use_capture=bool(json["useCapture"]), + passive=bool(json["passive"]), + once=bool(json["once"]), + script_id=runtime.ScriptId.from_json(json["scriptId"]), + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + handler=( + runtime.RemoteObject.from_json(json["handler"]) + if json.get("handler", None) is not None + else None + ), + original_handler=( + runtime.RemoteObject.from_json(json["originalHandler"]) + if json.get("originalHandler", None) is not None + else None + ), + backend_node_id=( + dom.BackendNodeId.from_json(json["backendNodeId"]) + if json.get("backendNodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_event_listeners( + object_id: runtime.RemoteObjectId, + depth: typing.Optional[int] = None, + pierce: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[EventListener]]: + """ + Returns event listeners of the given object. + + :param object_id: Identifier of the object to return listeners for. + :param depth: *(Optional)* The maximum depth at which Node children should be retrieved, defaults to 1. Use -1 for the entire subtree or provide an integer larger than 0. + :param pierce: *(Optional)* Whether or not iframes and shadow roots should be traversed when returning the subtree (default is false). Reports listeners for all contexts if pierce is enabled. + :returns: Array of relevant listeners. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + if depth is not None: + params["depth"] = depth + if pierce is not None: + params["pierce"] = pierce + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.getEventListeners", + "params": params, + } + json = yield cmd_dict + return [EventListener.from_json(i) for i in json["listeners"]]
+ + + +
+[docs] +def remove_dom_breakpoint( + node_id: dom.NodeId, type_: DOMBreakpointType +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes DOM breakpoint that was set using ``setDOMBreakpoint``. + + :param node_id: Identifier of the node to remove breakpoint from. + :param type_: Type of the breakpoint to remove. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["type"] = type_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.removeDOMBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_event_listener_breakpoint( + event_name: str, target_name: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes breakpoint on particular DOM event. + + :param event_name: Event name. + :param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + if target_name is not None: + params["targetName"] = target_name + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.removeEventListenerBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def remove_instrumentation_breakpoint( + event_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes breakpoint on particular native event. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param event_name: Instrumentation name to stop on. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.removeInstrumentationBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_xhr_breakpoint(url: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes breakpoint from XMLHttpRequest. + + :param url: Resource URL substring. + """ + params: T_JSON_DICT = dict() + params["url"] = url + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.removeXHRBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_break_on_csp_violation( + violation_types: typing.List[CSPViolationType], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on particular CSP violations. + + **EXPERIMENTAL** + + :param violation_types: CSP Violations to stop upon. + """ + params: T_JSON_DICT = dict() + params["violationTypes"] = [i.to_json() for i in violation_types] + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.setBreakOnCSPViolation", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_dom_breakpoint( + node_id: dom.NodeId, type_: DOMBreakpointType +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on particular operation with DOM. + + :param node_id: Identifier of the node to set breakpoint on. + :param type_: Type of the operation to stop upon. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + params["type"] = type_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.setDOMBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_event_listener_breakpoint( + event_name: str, target_name: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on particular DOM event. + + :param event_name: DOM Event name to stop on (any DOM event will do). + :param target_name: **(EXPERIMENTAL)** *(Optional)* EventTarget interface name to stop on. If equal to ```"*"``` or not provided, will stop on any EventTarget. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + if target_name is not None: + params["targetName"] = target_name + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.setEventListenerBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_instrumentation_breakpoint( + event_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on particular native event. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param event_name: Instrumentation name to stop on. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.setInstrumentationBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_xhr_breakpoint(url: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on XMLHttpRequest. + + :param url: Resource URL substring. All XHRs having this substring in the URL will get stopped upon. + """ + params: T_JSON_DICT = dict() + params["url"] = url + cmd_dict: T_JSON_DICT = { + "method": "DOMDebugger.setXHRBreakpoint", + "params": params, + } + json = yield cmd_dict
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/dom_snapshot.html b/docs/_build/html/_modules/nodriver/cdp/dom_snapshot.html new file mode 100644 index 0000000..8974e51 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/dom_snapshot.html @@ -0,0 +1,1484 @@ + + + + + + + + nodriver.cdp.dom_snapshot - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.dom_snapshot

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DOMSnapshot (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import dom_debugger
+from . import page
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +@dataclass +class DOMNode: + """ + A Node in the DOM tree. + """ + + #: ``Node``'s nodeType. + node_type: int + + #: ``Node``'s nodeName. + node_name: str + + #: ``Node``'s nodeValue. + node_value: str + + #: ``Node``'s id, corresponds to DOM.Node.backendNodeId. + backend_node_id: dom.BackendNodeId + + #: Only set for textarea elements, contains the text value. + text_value: typing.Optional[str] = None + + #: Only set for input elements, contains the input's associated text value. + input_value: typing.Optional[str] = None + + #: Only set for radio and checkbox input elements, indicates if the element has been checked + input_checked: typing.Optional[bool] = None + + #: Only set for option elements, indicates if the element has been selected + option_selected: typing.Optional[bool] = None + + #: The indexes of the node's child nodes in the ``domNodes`` array returned by ``getSnapshot``, if + #: any. + child_node_indexes: typing.Optional[typing.List[int]] = None + + #: Attributes of an ``Element`` node. + attributes: typing.Optional[typing.List[NameValue]] = None + + #: Indexes of pseudo elements associated with this node in the ``domNodes`` array returned by + #: ``getSnapshot``, if any. + pseudo_element_indexes: typing.Optional[typing.List[int]] = None + + #: The index of the node's related layout tree node in the ``layoutTreeNodes`` array returned by + #: ``getSnapshot``, if any. + layout_node_index: typing.Optional[int] = None + + #: Document URL that ``Document`` or ``FrameOwner`` node points to. + document_url: typing.Optional[str] = None + + #: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion. + base_url: typing.Optional[str] = None + + #: Only set for documents, contains the document's content language. + content_language: typing.Optional[str] = None + + #: Only set for documents, contains the document's character set encoding. + document_encoding: typing.Optional[str] = None + + #: ``DocumentType`` node's publicId. + public_id: typing.Optional[str] = None + + #: ``DocumentType`` node's systemId. + system_id: typing.Optional[str] = None + + #: Frame ID for frame owner elements and also for the document node. + frame_id: typing.Optional[page.FrameId] = None + + #: The index of a frame owner element's content document in the ``domNodes`` array returned by + #: ``getSnapshot``, if any. + content_document_index: typing.Optional[int] = None + + #: Type of a pseudo element node. + pseudo_type: typing.Optional[dom.PseudoType] = None + + #: Shadow root type. + shadow_root_type: typing.Optional[dom.ShadowRootType] = None + + #: Whether this DOM node responds to mouse clicks. This includes nodes that have had click + #: event listeners attached via JavaScript as well as anchor tags that naturally navigate when + #: clicked. + is_clickable: typing.Optional[bool] = None + + #: Details of the node's event listeners, if any. + event_listeners: typing.Optional[typing.List[dom_debugger.EventListener]] = None + + #: The selected url for nodes with a srcset attribute. + current_source_url: typing.Optional[str] = None + + #: The url of the script (if any) that generates this node. + origin_url: typing.Optional[str] = None + + #: Scroll offsets, set when this node is a Document. + scroll_offset_x: typing.Optional[float] = None + + scroll_offset_y: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeType"] = self.node_type + json["nodeName"] = self.node_name + json["nodeValue"] = self.node_value + json["backendNodeId"] = self.backend_node_id.to_json() + if self.text_value is not None: + json["textValue"] = self.text_value + if self.input_value is not None: + json["inputValue"] = self.input_value + if self.input_checked is not None: + json["inputChecked"] = self.input_checked + if self.option_selected is not None: + json["optionSelected"] = self.option_selected + if self.child_node_indexes is not None: + json["childNodeIndexes"] = [i for i in self.child_node_indexes] + if self.attributes is not None: + json["attributes"] = [i.to_json() for i in self.attributes] + if self.pseudo_element_indexes is not None: + json["pseudoElementIndexes"] = [i for i in self.pseudo_element_indexes] + if self.layout_node_index is not None: + json["layoutNodeIndex"] = self.layout_node_index + if self.document_url is not None: + json["documentURL"] = self.document_url + if self.base_url is not None: + json["baseURL"] = self.base_url + if self.content_language is not None: + json["contentLanguage"] = self.content_language + if self.document_encoding is not None: + json["documentEncoding"] = self.document_encoding + if self.public_id is not None: + json["publicId"] = self.public_id + if self.system_id is not None: + json["systemId"] = self.system_id + if self.frame_id is not None: + json["frameId"] = self.frame_id.to_json() + if self.content_document_index is not None: + json["contentDocumentIndex"] = self.content_document_index + if self.pseudo_type is not None: + json["pseudoType"] = self.pseudo_type.to_json() + if self.shadow_root_type is not None: + json["shadowRootType"] = self.shadow_root_type.to_json() + if self.is_clickable is not None: + json["isClickable"] = self.is_clickable + if self.event_listeners is not None: + json["eventListeners"] = [i.to_json() for i in self.event_listeners] + if self.current_source_url is not None: + json["currentSourceURL"] = self.current_source_url + if self.origin_url is not None: + json["originURL"] = self.origin_url + if self.scroll_offset_x is not None: + json["scrollOffsetX"] = self.scroll_offset_x + if self.scroll_offset_y is not None: + json["scrollOffsetY"] = self.scroll_offset_y + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DOMNode: + return cls( + node_type=int(json["nodeType"]), + node_name=str(json["nodeName"]), + node_value=str(json["nodeValue"]), + backend_node_id=dom.BackendNodeId.from_json(json["backendNodeId"]), + text_value=( + str(json["textValue"]) + if json.get("textValue", None) is not None + else None + ), + input_value=( + str(json["inputValue"]) + if json.get("inputValue", None) is not None + else None + ), + input_checked=( + bool(json["inputChecked"]) + if json.get("inputChecked", None) is not None + else None + ), + option_selected=( + bool(json["optionSelected"]) + if json.get("optionSelected", None) is not None + else None + ), + child_node_indexes=( + [int(i) for i in json["childNodeIndexes"]] + if json.get("childNodeIndexes", None) is not None + else None + ), + attributes=( + [NameValue.from_json(i) for i in json["attributes"]] + if json.get("attributes", None) is not None + else None + ), + pseudo_element_indexes=( + [int(i) for i in json["pseudoElementIndexes"]] + if json.get("pseudoElementIndexes", None) is not None + else None + ), + layout_node_index=( + int(json["layoutNodeIndex"]) + if json.get("layoutNodeIndex", None) is not None + else None + ), + document_url=( + str(json["documentURL"]) + if json.get("documentURL", None) is not None + else None + ), + base_url=( + str(json["baseURL"]) if json.get("baseURL", None) is not None else None + ), + content_language=( + str(json["contentLanguage"]) + if json.get("contentLanguage", None) is not None + else None + ), + document_encoding=( + str(json["documentEncoding"]) + if json.get("documentEncoding", None) is not None + else None + ), + public_id=( + str(json["publicId"]) + if json.get("publicId", None) is not None + else None + ), + system_id=( + str(json["systemId"]) + if json.get("systemId", None) is not None + else None + ), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + content_document_index=( + int(json["contentDocumentIndex"]) + if json.get("contentDocumentIndex", None) is not None + else None + ), + pseudo_type=( + dom.PseudoType.from_json(json["pseudoType"]) + if json.get("pseudoType", None) is not None + else None + ), + shadow_root_type=( + dom.ShadowRootType.from_json(json["shadowRootType"]) + if json.get("shadowRootType", None) is not None + else None + ), + is_clickable=( + bool(json["isClickable"]) + if json.get("isClickable", None) is not None + else None + ), + event_listeners=( + [ + dom_debugger.EventListener.from_json(i) + for i in json["eventListeners"] + ] + if json.get("eventListeners", None) is not None + else None + ), + current_source_url=( + str(json["currentSourceURL"]) + if json.get("currentSourceURL", None) is not None + else None + ), + origin_url=( + str(json["originURL"]) + if json.get("originURL", None) is not None + else None + ), + scroll_offset_x=( + float(json["scrollOffsetX"]) + if json.get("scrollOffsetX", None) is not None + else None + ), + scroll_offset_y=( + float(json["scrollOffsetY"]) + if json.get("scrollOffsetY", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class InlineTextBox: + """ + Details of post layout rendered text positions. The exact layout should not be regarded as + stable and may change between versions. + """ + + #: The bounding box in document coordinates. Note that scroll offset of the document is ignored. + bounding_box: dom.Rect + + #: The starting index in characters, for this post layout textbox substring. Characters that + #: would be represented as a surrogate pair in UTF-16 have length 2. + start_character_index: int + + #: The number of characters in this post layout textbox substring. Characters that would be + #: represented as a surrogate pair in UTF-16 have length 2. + num_characters: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["boundingBox"] = self.bounding_box.to_json() + json["startCharacterIndex"] = self.start_character_index + json["numCharacters"] = self.num_characters + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InlineTextBox: + return cls( + bounding_box=dom.Rect.from_json(json["boundingBox"]), + start_character_index=int(json["startCharacterIndex"]), + num_characters=int(json["numCharacters"]), + )
+ + + +
+[docs] +@dataclass +class LayoutTreeNode: + """ + Details of an element in the DOM tree with a LayoutObject. + """ + + #: The index of the related DOM node in the ``domNodes`` array returned by ``getSnapshot``. + dom_node_index: int + + #: The bounding box in document coordinates. Note that scroll offset of the document is ignored. + bounding_box: dom.Rect + + #: Contents of the LayoutText, if any. + layout_text: typing.Optional[str] = None + + #: The post-layout inline text nodes, if any. + inline_text_nodes: typing.Optional[typing.List[InlineTextBox]] = None + + #: Index into the ``computedStyles`` array returned by ``getSnapshot``. + style_index: typing.Optional[int] = None + + #: Global paint order index, which is determined by the stacking order of the nodes. Nodes + #: that are painted together will have the same index. Only provided if includePaintOrder in + #: getSnapshot was true. + paint_order: typing.Optional[int] = None + + #: Set to true to indicate the element begins a new stacking context. + is_stacking_context: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["domNodeIndex"] = self.dom_node_index + json["boundingBox"] = self.bounding_box.to_json() + if self.layout_text is not None: + json["layoutText"] = self.layout_text + if self.inline_text_nodes is not None: + json["inlineTextNodes"] = [i.to_json() for i in self.inline_text_nodes] + if self.style_index is not None: + json["styleIndex"] = self.style_index + if self.paint_order is not None: + json["paintOrder"] = self.paint_order + if self.is_stacking_context is not None: + json["isStackingContext"] = self.is_stacking_context + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayoutTreeNode: + return cls( + dom_node_index=int(json["domNodeIndex"]), + bounding_box=dom.Rect.from_json(json["boundingBox"]), + layout_text=( + str(json["layoutText"]) + if json.get("layoutText", None) is not None + else None + ), + inline_text_nodes=( + [InlineTextBox.from_json(i) for i in json["inlineTextNodes"]] + if json.get("inlineTextNodes", None) is not None + else None + ), + style_index=( + int(json["styleIndex"]) + if json.get("styleIndex", None) is not None + else None + ), + paint_order=( + int(json["paintOrder"]) + if json.get("paintOrder", None) is not None + else None + ), + is_stacking_context=( + bool(json["isStackingContext"]) + if json.get("isStackingContext", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ComputedStyle: + """ + A subset of the full ComputedStyle as defined by the request whitelist. + """ + + #: Name/value pairs of computed style properties. + properties: typing.List[NameValue] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["properties"] = [i.to_json() for i in self.properties] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ComputedStyle: + return cls( + properties=[NameValue.from_json(i) for i in json["properties"]], + )
+ + + +
+[docs] +@dataclass +class NameValue: + """ + A name/value pair. + """ + + #: Attribute/property name. + name: str + + #: Attribute/property value. + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NameValue: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +class StringIndex(int): + """ + Index of the string in the strings table. + """ + + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> StringIndex: + return cls(json) + + def __repr__(self): + return "StringIndex({})".format(super().__repr__())
+ + + +
+[docs] +class ArrayOfStrings(list): + """ + Index of the string in the strings table. + """ + + def to_json(self) -> typing.List[StringIndex]: + return self + + @classmethod + def from_json(cls, json: typing.List[StringIndex]) -> ArrayOfStrings: + return cls(json) + + def __repr__(self): + return "ArrayOfStrings({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class RareStringData: + """ + Data that is only present on rare nodes. + """ + + index: typing.List[int] + + value: typing.List[StringIndex] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["index"] = [i for i in self.index] + json["value"] = [i.to_json() for i in self.value] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RareStringData: + return cls( + index=[int(i) for i in json["index"]], + value=[StringIndex.from_json(i) for i in json["value"]], + )
+ + + +
+[docs] +@dataclass +class RareBooleanData: + index: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["index"] = [i for i in self.index] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RareBooleanData: + return cls( + index=[int(i) for i in json["index"]], + )
+ + + +
+[docs] +@dataclass +class RareIntegerData: + index: typing.List[int] + + value: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["index"] = [i for i in self.index] + json["value"] = [i for i in self.value] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RareIntegerData: + return cls( + index=[int(i) for i in json["index"]], + value=[int(i) for i in json["value"]], + )
+ + + +
+[docs] +class Rectangle(list): + def to_json(self) -> typing.List[float]: + return self + + @classmethod + def from_json(cls, json: typing.List[float]) -> Rectangle: + return cls(json) + + def __repr__(self): + return "Rectangle({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class DocumentSnapshot: + """ + Document snapshot. + """ + + #: Document URL that ``Document`` or ``FrameOwner`` node points to. + document_url: StringIndex + + #: Document title. + title: StringIndex + + #: Base URL that ``Document`` or ``FrameOwner`` node uses for URL completion. + base_url: StringIndex + + #: Contains the document's content language. + content_language: StringIndex + + #: Contains the document's character set encoding. + encoding_name: StringIndex + + #: ``DocumentType`` node's publicId. + public_id: StringIndex + + #: ``DocumentType`` node's systemId. + system_id: StringIndex + + #: Frame ID for frame owner elements and also for the document node. + frame_id: StringIndex + + #: A table with dom nodes. + nodes: NodeTreeSnapshot + + #: The nodes in the layout tree. + layout: LayoutTreeSnapshot + + #: The post-layout inline text nodes. + text_boxes: TextBoxSnapshot + + #: Horizontal scroll offset. + scroll_offset_x: typing.Optional[float] = None + + #: Vertical scroll offset. + scroll_offset_y: typing.Optional[float] = None + + #: Document content width. + content_width: typing.Optional[float] = None + + #: Document content height. + content_height: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["documentURL"] = self.document_url.to_json() + json["title"] = self.title.to_json() + json["baseURL"] = self.base_url.to_json() + json["contentLanguage"] = self.content_language.to_json() + json["encodingName"] = self.encoding_name.to_json() + json["publicId"] = self.public_id.to_json() + json["systemId"] = self.system_id.to_json() + json["frameId"] = self.frame_id.to_json() + json["nodes"] = self.nodes.to_json() + json["layout"] = self.layout.to_json() + json["textBoxes"] = self.text_boxes.to_json() + if self.scroll_offset_x is not None: + json["scrollOffsetX"] = self.scroll_offset_x + if self.scroll_offset_y is not None: + json["scrollOffsetY"] = self.scroll_offset_y + if self.content_width is not None: + json["contentWidth"] = self.content_width + if self.content_height is not None: + json["contentHeight"] = self.content_height + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DocumentSnapshot: + return cls( + document_url=StringIndex.from_json(json["documentURL"]), + title=StringIndex.from_json(json["title"]), + base_url=StringIndex.from_json(json["baseURL"]), + content_language=StringIndex.from_json(json["contentLanguage"]), + encoding_name=StringIndex.from_json(json["encodingName"]), + public_id=StringIndex.from_json(json["publicId"]), + system_id=StringIndex.from_json(json["systemId"]), + frame_id=StringIndex.from_json(json["frameId"]), + nodes=NodeTreeSnapshot.from_json(json["nodes"]), + layout=LayoutTreeSnapshot.from_json(json["layout"]), + text_boxes=TextBoxSnapshot.from_json(json["textBoxes"]), + scroll_offset_x=( + float(json["scrollOffsetX"]) + if json.get("scrollOffsetX", None) is not None + else None + ), + scroll_offset_y=( + float(json["scrollOffsetY"]) + if json.get("scrollOffsetY", None) is not None + else None + ), + content_width=( + float(json["contentWidth"]) + if json.get("contentWidth", None) is not None + else None + ), + content_height=( + float(json["contentHeight"]) + if json.get("contentHeight", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class NodeTreeSnapshot: + """ + Table containing nodes. + """ + + #: Parent node index. + parent_index: typing.Optional[typing.List[int]] = None + + #: ``Node``'s nodeType. + node_type: typing.Optional[typing.List[int]] = None + + #: Type of the shadow root the ``Node`` is in. String values are equal to the ``ShadowRootType`` enum. + shadow_root_type: typing.Optional[RareStringData] = None + + #: ``Node``'s nodeName. + node_name: typing.Optional[typing.List[StringIndex]] = None + + #: ``Node``'s nodeValue. + node_value: typing.Optional[typing.List[StringIndex]] = None + + #: ``Node``'s id, corresponds to DOM.Node.backendNodeId. + backend_node_id: typing.Optional[typing.List[dom.BackendNodeId]] = None + + #: Attributes of an ``Element`` node. Flatten name, value pairs. + attributes: typing.Optional[typing.List[ArrayOfStrings]] = None + + #: Only set for textarea elements, contains the text value. + text_value: typing.Optional[RareStringData] = None + + #: Only set for input elements, contains the input's associated text value. + input_value: typing.Optional[RareStringData] = None + + #: Only set for radio and checkbox input elements, indicates if the element has been checked + input_checked: typing.Optional[RareBooleanData] = None + + #: Only set for option elements, indicates if the element has been selected + option_selected: typing.Optional[RareBooleanData] = None + + #: The index of the document in the list of the snapshot documents. + content_document_index: typing.Optional[RareIntegerData] = None + + #: Type of a pseudo element node. + pseudo_type: typing.Optional[RareStringData] = None + + #: Pseudo element identifier for this node. Only present if there is a + #: valid pseudoType. + pseudo_identifier: typing.Optional[RareStringData] = None + + #: Whether this DOM node responds to mouse clicks. This includes nodes that have had click + #: event listeners attached via JavaScript as well as anchor tags that naturally navigate when + #: clicked. + is_clickable: typing.Optional[RareBooleanData] = None + + #: The selected url for nodes with a srcset attribute. + current_source_url: typing.Optional[RareStringData] = None + + #: The url of the script (if any) that generates this node. + origin_url: typing.Optional[RareStringData] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.parent_index is not None: + json["parentIndex"] = [i for i in self.parent_index] + if self.node_type is not None: + json["nodeType"] = [i for i in self.node_type] + if self.shadow_root_type is not None: + json["shadowRootType"] = self.shadow_root_type.to_json() + if self.node_name is not None: + json["nodeName"] = [i.to_json() for i in self.node_name] + if self.node_value is not None: + json["nodeValue"] = [i.to_json() for i in self.node_value] + if self.backend_node_id is not None: + json["backendNodeId"] = [i.to_json() for i in self.backend_node_id] + if self.attributes is not None: + json["attributes"] = [i.to_json() for i in self.attributes] + if self.text_value is not None: + json["textValue"] = self.text_value.to_json() + if self.input_value is not None: + json["inputValue"] = self.input_value.to_json() + if self.input_checked is not None: + json["inputChecked"] = self.input_checked.to_json() + if self.option_selected is not None: + json["optionSelected"] = self.option_selected.to_json() + if self.content_document_index is not None: + json["contentDocumentIndex"] = self.content_document_index.to_json() + if self.pseudo_type is not None: + json["pseudoType"] = self.pseudo_type.to_json() + if self.pseudo_identifier is not None: + json["pseudoIdentifier"] = self.pseudo_identifier.to_json() + if self.is_clickable is not None: + json["isClickable"] = self.is_clickable.to_json() + if self.current_source_url is not None: + json["currentSourceURL"] = self.current_source_url.to_json() + if self.origin_url is not None: + json["originURL"] = self.origin_url.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodeTreeSnapshot: + return cls( + parent_index=( + [int(i) for i in json["parentIndex"]] + if json.get("parentIndex", None) is not None + else None + ), + node_type=( + [int(i) for i in json["nodeType"]] + if json.get("nodeType", None) is not None + else None + ), + shadow_root_type=( + RareStringData.from_json(json["shadowRootType"]) + if json.get("shadowRootType", None) is not None + else None + ), + node_name=( + [StringIndex.from_json(i) for i in json["nodeName"]] + if json.get("nodeName", None) is not None + else None + ), + node_value=( + [StringIndex.from_json(i) for i in json["nodeValue"]] + if json.get("nodeValue", None) is not None + else None + ), + backend_node_id=( + [dom.BackendNodeId.from_json(i) for i in json["backendNodeId"]] + if json.get("backendNodeId", None) is not None + else None + ), + attributes=( + [ArrayOfStrings.from_json(i) for i in json["attributes"]] + if json.get("attributes", None) is not None + else None + ), + text_value=( + RareStringData.from_json(json["textValue"]) + if json.get("textValue", None) is not None + else None + ), + input_value=( + RareStringData.from_json(json["inputValue"]) + if json.get("inputValue", None) is not None + else None + ), + input_checked=( + RareBooleanData.from_json(json["inputChecked"]) + if json.get("inputChecked", None) is not None + else None + ), + option_selected=( + RareBooleanData.from_json(json["optionSelected"]) + if json.get("optionSelected", None) is not None + else None + ), + content_document_index=( + RareIntegerData.from_json(json["contentDocumentIndex"]) + if json.get("contentDocumentIndex", None) is not None + else None + ), + pseudo_type=( + RareStringData.from_json(json["pseudoType"]) + if json.get("pseudoType", None) is not None + else None + ), + pseudo_identifier=( + RareStringData.from_json(json["pseudoIdentifier"]) + if json.get("pseudoIdentifier", None) is not None + else None + ), + is_clickable=( + RareBooleanData.from_json(json["isClickable"]) + if json.get("isClickable", None) is not None + else None + ), + current_source_url=( + RareStringData.from_json(json["currentSourceURL"]) + if json.get("currentSourceURL", None) is not None + else None + ), + origin_url=( + RareStringData.from_json(json["originURL"]) + if json.get("originURL", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class LayoutTreeSnapshot: + """ + Table of details of an element in the DOM tree with a LayoutObject. + """ + + #: Index of the corresponding node in the ``NodeTreeSnapshot`` array returned by ``captureSnapshot``. + node_index: typing.List[int] + + #: Array of indexes specifying computed style strings, filtered according to the ``computedStyles`` parameter passed to ``captureSnapshot``. + styles: typing.List[ArrayOfStrings] + + #: The absolute position bounding box. + bounds: typing.List[Rectangle] + + #: Contents of the LayoutText, if any. + text: typing.List[StringIndex] + + #: Stacking context information. + stacking_contexts: RareBooleanData + + #: Global paint order index, which is determined by the stacking order of the nodes. Nodes + #: that are painted together will have the same index. Only provided if includePaintOrder in + #: captureSnapshot was true. + paint_orders: typing.Optional[typing.List[int]] = None + + #: The offset rect of nodes. Only available when includeDOMRects is set to true + offset_rects: typing.Optional[typing.List[Rectangle]] = None + + #: The scroll rect of nodes. Only available when includeDOMRects is set to true + scroll_rects: typing.Optional[typing.List[Rectangle]] = None + + #: The client rect of nodes. Only available when includeDOMRects is set to true + client_rects: typing.Optional[typing.List[Rectangle]] = None + + #: The list of background colors that are blended with colors of overlapping elements. + blended_background_colors: typing.Optional[typing.List[StringIndex]] = None + + #: The list of computed text opacities. + text_color_opacities: typing.Optional[typing.List[float]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeIndex"] = [i for i in self.node_index] + json["styles"] = [i.to_json() for i in self.styles] + json["bounds"] = [i.to_json() for i in self.bounds] + json["text"] = [i.to_json() for i in self.text] + json["stackingContexts"] = self.stacking_contexts.to_json() + if self.paint_orders is not None: + json["paintOrders"] = [i for i in self.paint_orders] + if self.offset_rects is not None: + json["offsetRects"] = [i.to_json() for i in self.offset_rects] + if self.scroll_rects is not None: + json["scrollRects"] = [i.to_json() for i in self.scroll_rects] + if self.client_rects is not None: + json["clientRects"] = [i.to_json() for i in self.client_rects] + if self.blended_background_colors is not None: + json["blendedBackgroundColors"] = [ + i.to_json() for i in self.blended_background_colors + ] + if self.text_color_opacities is not None: + json["textColorOpacities"] = [i for i in self.text_color_opacities] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayoutTreeSnapshot: + return cls( + node_index=[int(i) for i in json["nodeIndex"]], + styles=[ArrayOfStrings.from_json(i) for i in json["styles"]], + bounds=[Rectangle.from_json(i) for i in json["bounds"]], + text=[StringIndex.from_json(i) for i in json["text"]], + stacking_contexts=RareBooleanData.from_json(json["stackingContexts"]), + paint_orders=( + [int(i) for i in json["paintOrders"]] + if json.get("paintOrders", None) is not None + else None + ), + offset_rects=( + [Rectangle.from_json(i) for i in json["offsetRects"]] + if json.get("offsetRects", None) is not None + else None + ), + scroll_rects=( + [Rectangle.from_json(i) for i in json["scrollRects"]] + if json.get("scrollRects", None) is not None + else None + ), + client_rects=( + [Rectangle.from_json(i) for i in json["clientRects"]] + if json.get("clientRects", None) is not None + else None + ), + blended_background_colors=( + [StringIndex.from_json(i) for i in json["blendedBackgroundColors"]] + if json.get("blendedBackgroundColors", None) is not None + else None + ), + text_color_opacities=( + [float(i) for i in json["textColorOpacities"]] + if json.get("textColorOpacities", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class TextBoxSnapshot: + """ + Table of details of the post layout rendered text positions. The exact layout should not be regarded as + stable and may change between versions. + """ + + #: Index of the layout tree node that owns this box collection. + layout_index: typing.List[int] + + #: The absolute position bounding box. + bounds: typing.List[Rectangle] + + #: The starting index in characters, for this post layout textbox substring. Characters that + #: would be represented as a surrogate pair in UTF-16 have length 2. + start: typing.List[int] + + #: The number of characters in this post layout textbox substring. Characters that would be + #: represented as a surrogate pair in UTF-16 have length 2. + length: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["layoutIndex"] = [i for i in self.layout_index] + json["bounds"] = [i.to_json() for i in self.bounds] + json["start"] = [i for i in self.start] + json["length"] = [i for i in self.length] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TextBoxSnapshot: + return cls( + layout_index=[int(i) for i in json["layoutIndex"]], + bounds=[Rectangle.from_json(i) for i in json["bounds"]], + start=[int(i) for i in json["start"]], + length=[int(i) for i in json["length"]], + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables DOM snapshot agent for the given page. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOMSnapshot.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables DOM snapshot agent for the given page. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOMSnapshot.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def get_snapshot( + computed_style_whitelist: typing.List[str], + include_event_listeners: typing.Optional[bool] = None, + include_paint_order: typing.Optional[bool] = None, + include_user_agent_shadow_tree: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.List[DOMNode], typing.List[LayoutTreeNode], typing.List[ComputedStyle] + ], +]: + """ + Returns a document snapshot, including the full DOM tree of the root node (including iframes, + template contents, and imported documents) in a flattened array, as well as layout and + white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is + flattened. + + .. deprecated:: 1.3 + + :param computed_style_whitelist: Whitelist of computed styles to return. + :param include_event_listeners: *(Optional)* Whether or not to retrieve details of DOM listeners (default false). + :param include_paint_order: *(Optional)* Whether to determine and include the paint order index of LayoutTreeNodes (default false). + :param include_user_agent_shadow_tree: *(Optional)* Whether to include UA shadow tree in the snapshot (default false). + :returns: A tuple with the following items: + + 0. **domNodes** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document. + 1. **layoutTreeNodes** - The nodes in the layout tree. + 2. **computedStyles** - Whitelisted ComputedStyle properties for each node in the layout tree. + """ + params: T_JSON_DICT = dict() + params["computedStyleWhitelist"] = [i for i in computed_style_whitelist] + if include_event_listeners is not None: + params["includeEventListeners"] = include_event_listeners + if include_paint_order is not None: + params["includePaintOrder"] = include_paint_order + if include_user_agent_shadow_tree is not None: + params["includeUserAgentShadowTree"] = include_user_agent_shadow_tree + cmd_dict: T_JSON_DICT = { + "method": "DOMSnapshot.getSnapshot", + "params": params, + } + json = yield cmd_dict + return ( + [DOMNode.from_json(i) for i in json["domNodes"]], + [LayoutTreeNode.from_json(i) for i in json["layoutTreeNodes"]], + [ComputedStyle.from_json(i) for i in json["computedStyles"]], + )
+ + + +
+[docs] +def capture_snapshot( + computed_styles: typing.List[str], + include_paint_order: typing.Optional[bool] = None, + include_dom_rects: typing.Optional[bool] = None, + include_blended_background_colors: typing.Optional[bool] = None, + include_text_color_opacities: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[typing.List[DocumentSnapshot], typing.List[str]], +]: + """ + Returns a document snapshot, including the full DOM tree of the root node (including iframes, + template contents, and imported documents) in a flattened array, as well as layout and + white-listed computed style information for the nodes. Shadow DOM in the returned DOM tree is + flattened. + + :param computed_styles: Whitelist of computed styles to return. + :param include_paint_order: *(Optional)* Whether to include layout object paint orders into the snapshot. + :param include_dom_rects: *(Optional)* Whether to include DOM rectangles (offsetRects, clientRects, scrollRects) into the snapshot + :param include_blended_background_colors: **(EXPERIMENTAL)** *(Optional)* Whether to include blended background colors in the snapshot (default: false). Blended background color is achieved by blending background colors of all elements that overlap with the current element. + :param include_text_color_opacities: **(EXPERIMENTAL)** *(Optional)* Whether to include text color opacity in the snapshot (default: false). An element might have the opacity property set that affects the text color of the element. The final text color opacity is computed based on the opacity of all overlapping elements. + :returns: A tuple with the following items: + + 0. **documents** - The nodes in the DOM tree. The DOMNode at index 0 corresponds to the root document. + 1. **strings** - Shared string table that all string properties refer to with indexes. + """ + params: T_JSON_DICT = dict() + params["computedStyles"] = [i for i in computed_styles] + if include_paint_order is not None: + params["includePaintOrder"] = include_paint_order + if include_dom_rects is not None: + params["includeDOMRects"] = include_dom_rects + if include_blended_background_colors is not None: + params["includeBlendedBackgroundColors"] = include_blended_background_colors + if include_text_color_opacities is not None: + params["includeTextColorOpacities"] = include_text_color_opacities + cmd_dict: T_JSON_DICT = { + "method": "DOMSnapshot.captureSnapshot", + "params": params, + } + json = yield cmd_dict + return ( + [DocumentSnapshot.from_json(i) for i in json["documents"]], + [str(i) for i in json["strings"]], + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/dom_storage.html b/docs/_build/html/_modules/nodriver/cdp/dom_storage.html new file mode 100644 index 0000000..a5d5aa8 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/dom_storage.html @@ -0,0 +1,565 @@ + + + + + + + + nodriver.cdp.dom_storage - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.dom_storage

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: DOMStorage (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class SerializedStorageKey(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> SerializedStorageKey: + return cls(json) + + def __repr__(self): + return "SerializedStorageKey({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class StorageId: + """ + DOM Storage identifier. + """ + + #: Whether the storage is local storage (not session storage). + is_local_storage: bool + + #: Security origin for the storage. + security_origin: typing.Optional[str] = None + + #: Represents a key by which DOM Storage keys its CachedStorageAreas + storage_key: typing.Optional[SerializedStorageKey] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["isLocalStorage"] = self.is_local_storage + if self.security_origin is not None: + json["securityOrigin"] = self.security_origin + if self.storage_key is not None: + json["storageKey"] = self.storage_key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StorageId: + return cls( + is_local_storage=bool(json["isLocalStorage"]), + security_origin=( + str(json["securityOrigin"]) + if json.get("securityOrigin", None) is not None + else None + ), + storage_key=( + SerializedStorageKey.from_json(json["storageKey"]) + if json.get("storageKey", None) is not None + else None + ), + )
+ + + +
+[docs] +class Item(list): + """ + DOM Storage item. + """ + + def to_json(self) -> typing.List[str]: + return self + + @classmethod + def from_json(cls, json: typing.List[str]) -> Item: + return cls(json) + + def __repr__(self): + return "Item({})".format(super().__repr__())
+ + + +
+[docs] +def clear(storage_id: StorageId) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param storage_id: + """ + params: T_JSON_DICT = dict() + params["storageId"] = storage_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.clear", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables storage tracking, prevents storage events from being sent to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables storage tracking, storage events will now be delivered to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_dom_storage_items( + storage_id: StorageId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Item]]: + """ + :param storage_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["storageId"] = storage_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.getDOMStorageItems", + "params": params, + } + json = yield cmd_dict + return [Item.from_json(i) for i in json["entries"]]
+ + + +
+[docs] +def remove_dom_storage_item( + storage_id: StorageId, key: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param storage_id: + :param key: + """ + params: T_JSON_DICT = dict() + params["storageId"] = storage_id.to_json() + params["key"] = key + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.removeDOMStorageItem", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_dom_storage_item( + storage_id: StorageId, key: str, value: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param storage_id: + :param key: + :param value: + """ + params: T_JSON_DICT = dict() + params["storageId"] = storage_id.to_json() + params["key"] = key + params["value"] = value + cmd_dict: T_JSON_DICT = { + "method": "DOMStorage.setDOMStorageItem", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("DOMStorage.domStorageItemAdded") +@dataclass +class DomStorageItemAdded: + storage_id: StorageId + key: str + new_value: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DomStorageItemAdded: + return cls( + storage_id=StorageId.from_json(json["storageId"]), + key=str(json["key"]), + new_value=str(json["newValue"]), + )
+ + + +
+[docs] +@event_class("DOMStorage.domStorageItemRemoved") +@dataclass +class DomStorageItemRemoved: + storage_id: StorageId + key: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DomStorageItemRemoved: + return cls( + storage_id=StorageId.from_json(json["storageId"]), key=str(json["key"]) + )
+ + + +
+[docs] +@event_class("DOMStorage.domStorageItemUpdated") +@dataclass +class DomStorageItemUpdated: + storage_id: StorageId + key: str + old_value: str + new_value: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DomStorageItemUpdated: + return cls( + storage_id=StorageId.from_json(json["storageId"]), + key=str(json["key"]), + old_value=str(json["oldValue"]), + new_value=str(json["newValue"]), + )
+ + + +
+[docs] +@event_class("DOMStorage.domStorageItemsCleared") +@dataclass +class DomStorageItemsCleared: + storage_id: StorageId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DomStorageItemsCleared: + return cls(storage_id=StorageId.from_json(json["storageId"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/emulation.html b/docs/_build/html/_modules/nodriver/cdp/emulation.html new file mode 100644 index 0000000..2c4c7bb --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/emulation.html @@ -0,0 +1,1597 @@ + + + + + + + + nodriver.cdp.emulation - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.emulation

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Emulation
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import network
+from . import page
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +@dataclass +class ScreenOrientation: + """ + Screen orientation. + """ + + #: Orientation type. + type_: str + + #: Orientation angle. + angle: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + json["angle"] = self.angle + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreenOrientation: + return cls( + type_=str(json["type"]), + angle=int(json["angle"]), + )
+ + + +
+[docs] +@dataclass +class DisplayFeature: + #: Orientation of a display feature in relation to screen + orientation: str + + #: The offset from the screen origin in either the x (for vertical + #: orientation) or y (for horizontal orientation) direction. + offset: int + + #: A display feature may mask content such that it is not physically + #: displayed - this length along with the offset describes this area. + #: A display feature that only splits content will have a 0 mask_length. + mask_length: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["orientation"] = self.orientation + json["offset"] = self.offset + json["maskLength"] = self.mask_length + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DisplayFeature: + return cls( + orientation=str(json["orientation"]), + offset=int(json["offset"]), + mask_length=int(json["maskLength"]), + )
+ + + +
+[docs] +@dataclass +class DevicePosture: + #: Current posture of the device + type_: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DevicePosture: + return cls( + type_=str(json["type"]), + )
+ + + +
+[docs] +@dataclass +class MediaFeature: + name: str + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> MediaFeature: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +class VirtualTimePolicy(enum.Enum): + """ + advance: If the scheduler runs out of immediate work, the virtual time base may fast forward to + allow the next delayed task (if any) to run; pause: The virtual time base may not advance; + pauseIfNetworkFetchesPending: The virtual time base may not advance if there are any pending + resource fetches. + """ + + ADVANCE = "advance" + PAUSE = "pause" + PAUSE_IF_NETWORK_FETCHES_PENDING = "pauseIfNetworkFetchesPending" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> VirtualTimePolicy: + return cls(json)
+ + + +
+[docs] +@dataclass +class UserAgentBrandVersion: + """ + Used to specify User Agent Client Hints to emulate. See https://wicg.github.io/ua-client-hints + """ + + brand: str + + version: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["brand"] = self.brand + json["version"] = self.version + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> UserAgentBrandVersion: + return cls( + brand=str(json["brand"]), + version=str(json["version"]), + )
+ + + +
+[docs] +@dataclass +class UserAgentMetadata: + """ + Used to specify User Agent Client Hints to emulate. See https://wicg.github.io/ua-client-hints + Missing optional values will be filled in by the target with what it would normally use. + """ + + platform: str + + platform_version: str + + architecture: str + + model: str + + mobile: bool + + #: Brands appearing in Sec-CH-UA. + brands: typing.Optional[typing.List[UserAgentBrandVersion]] = None + + #: Brands appearing in Sec-CH-UA-Full-Version-List. + full_version_list: typing.Optional[typing.List[UserAgentBrandVersion]] = None + + full_version: typing.Optional[str] = None + + bitness: typing.Optional[str] = None + + wow64: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["platform"] = self.platform + json["platformVersion"] = self.platform_version + json["architecture"] = self.architecture + json["model"] = self.model + json["mobile"] = self.mobile + if self.brands is not None: + json["brands"] = [i.to_json() for i in self.brands] + if self.full_version_list is not None: + json["fullVersionList"] = [i.to_json() for i in self.full_version_list] + if self.full_version is not None: + json["fullVersion"] = self.full_version + if self.bitness is not None: + json["bitness"] = self.bitness + if self.wow64 is not None: + json["wow64"] = self.wow64 + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> UserAgentMetadata: + return cls( + platform=str(json["platform"]), + platform_version=str(json["platformVersion"]), + architecture=str(json["architecture"]), + model=str(json["model"]), + mobile=bool(json["mobile"]), + brands=( + [UserAgentBrandVersion.from_json(i) for i in json["brands"]] + if json.get("brands", None) is not None + else None + ), + full_version_list=( + [UserAgentBrandVersion.from_json(i) for i in json["fullVersionList"]] + if json.get("fullVersionList", None) is not None + else None + ), + full_version=( + str(json["fullVersion"]) + if json.get("fullVersion", None) is not None + else None + ), + bitness=( + str(json["bitness"]) if json.get("bitness", None) is not None else None + ), + wow64=bool(json["wow64"]) if json.get("wow64", None) is not None else None, + )
+ + + +
+[docs] +class SensorType(enum.Enum): + """ + Used to specify sensor types to emulate. + See https://w3c.github.io/sensors/#automation for more information. + """ + + ABSOLUTE_ORIENTATION = "absolute-orientation" + ACCELEROMETER = "accelerometer" + AMBIENT_LIGHT = "ambient-light" + GRAVITY = "gravity" + GYROSCOPE = "gyroscope" + LINEAR_ACCELERATION = "linear-acceleration" + MAGNETOMETER = "magnetometer" + PROXIMITY = "proximity" + RELATIVE_ORIENTATION = "relative-orientation" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SensorType: + return cls(json)
+ + + +
+[docs] +@dataclass +class SensorMetadata: + available: typing.Optional[bool] = None + + minimum_frequency: typing.Optional[float] = None + + maximum_frequency: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.available is not None: + json["available"] = self.available + if self.minimum_frequency is not None: + json["minimumFrequency"] = self.minimum_frequency + if self.maximum_frequency is not None: + json["maximumFrequency"] = self.maximum_frequency + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SensorMetadata: + return cls( + available=( + bool(json["available"]) + if json.get("available", None) is not None + else None + ), + minimum_frequency=( + float(json["minimumFrequency"]) + if json.get("minimumFrequency", None) is not None + else None + ), + maximum_frequency=( + float(json["maximumFrequency"]) + if json.get("maximumFrequency", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SensorReadingSingle: + value: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SensorReadingSingle: + return cls( + value=float(json["value"]), + )
+ + + +
+[docs] +@dataclass +class SensorReadingXYZ: + x: float + + y: float + + z: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + json["z"] = self.z + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SensorReadingXYZ: + return cls( + x=float(json["x"]), + y=float(json["y"]), + z=float(json["z"]), + )
+ + + +
+[docs] +@dataclass +class SensorReadingQuaternion: + x: float + + y: float + + z: float + + w: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + json["z"] = self.z + json["w"] = self.w + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SensorReadingQuaternion: + return cls( + x=float(json["x"]), + y=float(json["y"]), + z=float(json["z"]), + w=float(json["w"]), + )
+ + + +
+[docs] +@dataclass +class SensorReading: + single: typing.Optional[SensorReadingSingle] = None + + xyz: typing.Optional[SensorReadingXYZ] = None + + quaternion: typing.Optional[SensorReadingQuaternion] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.single is not None: + json["single"] = self.single.to_json() + if self.xyz is not None: + json["xyz"] = self.xyz.to_json() + if self.quaternion is not None: + json["quaternion"] = self.quaternion.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SensorReading: + return cls( + single=( + SensorReadingSingle.from_json(json["single"]) + if json.get("single", None) is not None + else None + ), + xyz=( + SensorReadingXYZ.from_json(json["xyz"]) + if json.get("xyz", None) is not None + else None + ), + quaternion=( + SensorReadingQuaternion.from_json(json["quaternion"]) + if json.get("quaternion", None) is not None + else None + ), + )
+ + + +
+[docs] +class DisabledImageType(enum.Enum): + """ + Enum of image types that can be disabled. + """ + + AVIF = "avif" + WEBP = "webp" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DisabledImageType: + return cls(json)
+ + + +
+[docs] +@deprecated(version="1.3") +def can_emulate() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Tells whether emulation is supported. + + .. deprecated:: 1.3 + + :returns: True if emulation is supported. + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.canEmulate", + } + json = yield cmd_dict + return bool(json["result"])
+ + + +
+[docs] +def clear_device_metrics_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears the overridden device metrics. + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.clearDeviceMetricsOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_geolocation_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears the overridden Geolocation Position and Error. + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.clearGeolocationOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +def reset_page_scale_factor() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that page scale factor is reset to initial values. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.resetPageScaleFactor", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_focus_emulation_enabled( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables or disables simulating a focused and active page. + + **EXPERIMENTAL** + + :param enabled: Whether to enable to disable focus emulation. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setFocusEmulationEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_auto_dark_mode_override( + enabled: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Automatically render all web contents using a dark theme. + + **EXPERIMENTAL** + + :param enabled: *(Optional)* Whether to enable or disable automatic dark mode. If not specified, any existing override will be cleared. + """ + params: T_JSON_DICT = dict() + if enabled is not None: + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setAutoDarkModeOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_cpu_throttling_rate( + rate: float, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables CPU throttling to emulate slow CPUs. + + :param rate: Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc). + """ + params: T_JSON_DICT = dict() + params["rate"] = rate + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setCPUThrottlingRate", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_default_background_color_override( + color: typing.Optional[dom.RGBA] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets or clears an override of the default background color of the frame. This override is used + if the content does not specify one. + + :param color: *(Optional)* RGBA of the default background color. If not specified, any existing override will be cleared. + """ + params: T_JSON_DICT = dict() + if color is not None: + params["color"] = color.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setDefaultBackgroundColorOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_device_metrics_override( + width: int, + height: int, + device_scale_factor: float, + mobile: bool, + scale: typing.Optional[float] = None, + screen_width: typing.Optional[int] = None, + screen_height: typing.Optional[int] = None, + position_x: typing.Optional[int] = None, + position_y: typing.Optional[int] = None, + dont_set_visible_size: typing.Optional[bool] = None, + screen_orientation: typing.Optional[ScreenOrientation] = None, + viewport: typing.Optional[page.Viewport] = None, + display_feature: typing.Optional[DisplayFeature] = None, + device_posture: typing.Optional[DevicePosture] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the values of device screen dimensions (window.screen.width, window.screen.height, + window.innerWidth, window.innerHeight, and "device-width"/"device-height"-related CSS media + query results). + + :param width: Overriding width value in pixels (minimum 0, maximum 10000000). 0 disables the override. + :param height: Overriding height value in pixels (minimum 0, maximum 10000000). 0 disables the override. + :param device_scale_factor: Overriding device scale factor value. 0 disables the override. + :param mobile: Whether to emulate mobile device. This includes viewport meta tag, overlay scrollbars, text autosizing and more. + :param scale: **(EXPERIMENTAL)** *(Optional)* Scale to apply to resulting view image. + :param screen_width: **(EXPERIMENTAL)** *(Optional)* Overriding screen width value in pixels (minimum 0, maximum 10000000). + :param screen_height: **(EXPERIMENTAL)** *(Optional)* Overriding screen height value in pixels (minimum 0, maximum 10000000). + :param position_x: **(EXPERIMENTAL)** *(Optional)* Overriding view X position on screen in pixels (minimum 0, maximum 10000000). + :param position_y: **(EXPERIMENTAL)** *(Optional)* Overriding view Y position on screen in pixels (minimum 0, maximum 10000000). + :param dont_set_visible_size: **(EXPERIMENTAL)** *(Optional)* Do not set visible view size, rely upon explicit setVisibleSize call. + :param screen_orientation: *(Optional)* Screen orientation override. + :param viewport: **(EXPERIMENTAL)** *(Optional)* If set, the visible area of the page will be overridden to this viewport. This viewport change is not observed by the page, e.g. viewport-relative elements do not change positions. + :param display_feature: **(EXPERIMENTAL)** *(Optional)* If set, the display feature of a multi-segment screen. If not set, multi-segment support is turned-off. + :param device_posture: **(DEPRECATED)** **(EXPERIMENTAL)** *(Optional)* If set, the posture of a foldable device. If not set the posture is set to continuous. Deprecated, use Emulation.setDevicePostureOverride. + """ + params: T_JSON_DICT = dict() + params["width"] = width + params["height"] = height + params["deviceScaleFactor"] = device_scale_factor + params["mobile"] = mobile + if scale is not None: + params["scale"] = scale + if screen_width is not None: + params["screenWidth"] = screen_width + if screen_height is not None: + params["screenHeight"] = screen_height + if position_x is not None: + params["positionX"] = position_x + if position_y is not None: + params["positionY"] = position_y + if dont_set_visible_size is not None: + params["dontSetVisibleSize"] = dont_set_visible_size + if screen_orientation is not None: + params["screenOrientation"] = screen_orientation.to_json() + if viewport is not None: + params["viewport"] = viewport.to_json() + if display_feature is not None: + params["displayFeature"] = display_feature.to_json() + if device_posture is not None: + params["devicePosture"] = device_posture.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setDeviceMetricsOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_device_posture_override( + posture: DevicePosture, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Start reporting the given posture value to the Device Posture API. + This override can also be set in setDeviceMetricsOverride(). + + **EXPERIMENTAL** + + :param posture: + """ + params: T_JSON_DICT = dict() + params["posture"] = posture.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setDevicePostureOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_device_posture_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears a device posture override set with either setDeviceMetricsOverride() + or setDevicePostureOverride() and starts using posture information from the + platform again. + Does nothing if no override is set. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.clearDevicePostureOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_scrollbars_hidden( + hidden: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param hidden: Whether scrollbars should be always hidden. + """ + params: T_JSON_DICT = dict() + params["hidden"] = hidden + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setScrollbarsHidden", + "params": params, + } + json = yield cmd_dict
+ + + + + + + +
+[docs] +def set_emit_touch_events_for_mouse( + enabled: bool, configuration: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param enabled: Whether touch emulation based on mouse input should be enabled. + :param configuration: *(Optional)* Touch/gesture events configuration. Default: current platform. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + if configuration is not None: + params["configuration"] = configuration + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setEmitTouchEventsForMouse", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_emulated_media( + media: typing.Optional[str] = None, + features: typing.Optional[typing.List[MediaFeature]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Emulates the given media type or media feature for CSS media queries. + + :param media: *(Optional)* Media type to emulate. Empty string disables the override. + :param features: *(Optional)* Media features to emulate. + """ + params: T_JSON_DICT = dict() + if media is not None: + params["media"] = media + if features is not None: + params["features"] = [i.to_json() for i in features] + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setEmulatedMedia", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_emulated_vision_deficiency( + type_: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Emulates the given vision deficiency. + + :param type_: Vision deficiency to emulate. Order: best-effort emulations come first, followed by any physiologically accurate emulations for medically recognized color vision deficiencies. + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setEmulatedVisionDeficiency", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_geolocation_override( + latitude: typing.Optional[float] = None, + longitude: typing.Optional[float] = None, + accuracy: typing.Optional[float] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the Geolocation Position or Error. Omitting any of the parameters emulates position + unavailable. + + :param latitude: *(Optional)* Mock latitude + :param longitude: *(Optional)* Mock longitude + :param accuracy: *(Optional)* Mock accuracy + """ + params: T_JSON_DICT = dict() + if latitude is not None: + params["latitude"] = latitude + if longitude is not None: + params["longitude"] = longitude + if accuracy is not None: + params["accuracy"] = accuracy + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setGeolocationOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_overridden_sensor_information( + type_: SensorType, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, float]: + """ + + + **EXPERIMENTAL** + + :param type_: + :returns: + """ + params: T_JSON_DICT = dict() + params["type"] = type_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.getOverriddenSensorInformation", + "params": params, + } + json = yield cmd_dict + return float(json["requestedSamplingFrequency"])
+ + + +
+[docs] +def set_sensor_override_enabled( + enabled: bool, type_: SensorType, metadata: typing.Optional[SensorMetadata] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides a platform sensor of a given type. If ``enabled`` is true, calls to + Sensor.start() will use a virtual sensor as backend rather than fetching + data from a real hardware sensor. Otherwise, existing virtual + sensor-backend Sensor objects will fire an error event and new calls to + Sensor.start() will attempt to use a real sensor instead. + + **EXPERIMENTAL** + + :param enabled: + :param type_: + :param metadata: *(Optional)* + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + params["type"] = type_.to_json() + if metadata is not None: + params["metadata"] = metadata.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setSensorOverrideEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_sensor_override_readings( + type_: SensorType, reading: SensorReading +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Updates the sensor readings reported by a sensor type previously overridden + by setSensorOverrideEnabled. + + **EXPERIMENTAL** + + :param type_: + :param reading: + """ + params: T_JSON_DICT = dict() + params["type"] = type_.to_json() + params["reading"] = reading.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setSensorOverrideReadings", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_idle_override( + is_user_active: bool, is_screen_unlocked: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the Idle state. + + :param is_user_active: Mock isUserActive + :param is_screen_unlocked: Mock isScreenUnlocked + """ + params: T_JSON_DICT = dict() + params["isUserActive"] = is_user_active + params["isScreenUnlocked"] = is_screen_unlocked + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setIdleOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_idle_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears Idle state overrides. + """ + cmd_dict: T_JSON_DICT = { + "method": "Emulation.clearIdleOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_navigator_overrides( + platform: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides value returned by the javascript navigator object. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param platform: The platform navigator.platform should return. + """ + params: T_JSON_DICT = dict() + params["platform"] = platform + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setNavigatorOverrides", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_page_scale_factor( + page_scale_factor: float, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets a specified page scale factor. + + **EXPERIMENTAL** + + :param page_scale_factor: Page scale factor. + """ + params: T_JSON_DICT = dict() + params["pageScaleFactor"] = page_scale_factor + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setPageScaleFactor", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_script_execution_disabled( + value: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Switches script execution in the page. + + :param value: Whether script execution should be disabled in the page. + """ + params: T_JSON_DICT = dict() + params["value"] = value + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setScriptExecutionDisabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_touch_emulation_enabled( + enabled: bool, max_touch_points: typing.Optional[int] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables touch on platforms which do not support them. + + :param enabled: Whether the touch event emulation should be enabled. + :param max_touch_points: *(Optional)* Maximum touch points supported. Defaults to one. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + if max_touch_points is not None: + params["maxTouchPoints"] = max_touch_points + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setTouchEmulationEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_virtual_time_policy( + policy: VirtualTimePolicy, + budget: typing.Optional[float] = None, + max_virtual_time_task_starvation_count: typing.Optional[int] = None, + initial_virtual_time: typing.Optional[network.TimeSinceEpoch] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, float]: + """ + Turns on virtual time for all frames (replacing real-time with a synthetic time source) and sets + the current virtual time policy. Note this supersedes any previous time budget. + + **EXPERIMENTAL** + + :param policy: + :param budget: *(Optional)* If set, after this many virtual milliseconds have elapsed virtual time will be paused and a virtualTimeBudgetExpired event is sent. + :param max_virtual_time_task_starvation_count: *(Optional)* If set this specifies the maximum number of tasks that can be run before virtual is forced forwards to prevent deadlock. + :param initial_virtual_time: *(Optional)* If set, base::Time::Now will be overridden to initially return this value. + :returns: Absolute timestamp at which virtual time was first enabled (up time in milliseconds). + """ + params: T_JSON_DICT = dict() + params["policy"] = policy.to_json() + if budget is not None: + params["budget"] = budget + if max_virtual_time_task_starvation_count is not None: + params["maxVirtualTimeTaskStarvationCount"] = ( + max_virtual_time_task_starvation_count + ) + if initial_virtual_time is not None: + params["initialVirtualTime"] = initial_virtual_time.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setVirtualTimePolicy", + "params": params, + } + json = yield cmd_dict + return float(json["virtualTimeTicksBase"])
+ + + +
+[docs] +def set_locale_override( + locale: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides default host system locale with the specified one. + + **EXPERIMENTAL** + + :param locale: *(Optional)* ICU style C locale (e.g. "en_US"). If not specified or empty, disables the override and restores default host system locale. + """ + params: T_JSON_DICT = dict() + if locale is not None: + params["locale"] = locale + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setLocaleOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_timezone_override( + timezone_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides default host system timezone with the specified one. + + :param timezone_id: The timezone identifier. List of supported timezones: https://source.chromium.org/chromium/chromium/deps/icu.git/+/faee8bc70570192d82d2978a71e2a615788597d1:source/data/misc/metaZones.txt If empty, disables the override and restores default host system timezone. + """ + params: T_JSON_DICT = dict() + params["timezoneId"] = timezone_id + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setTimezoneOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_visible_size( + width: int, height: int +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resizes the frame/viewport of the page. Note that this does not affect the frame's container + (e.g. browser window). Can be used to produce screenshots of the specified size. Not supported + on Android. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param width: Frame width (DIP). + :param height: Frame height (DIP). + """ + params: T_JSON_DICT = dict() + params["width"] = width + params["height"] = height + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setVisibleSize", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_disabled_image_types( + image_types: typing.List[DisabledImageType], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param image_types: Image types to disable. + """ + params: T_JSON_DICT = dict() + params["imageTypes"] = [i.to_json() for i in image_types] + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setDisabledImageTypes", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_hardware_concurrency_override( + hardware_concurrency: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param hardware_concurrency: Hardware concurrency to report + """ + params: T_JSON_DICT = dict() + params["hardwareConcurrency"] = hardware_concurrency + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setHardwareConcurrencyOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_user_agent_override( + user_agent: str, + accept_language: typing.Optional[str] = None, + platform: typing.Optional[str] = None, + user_agent_metadata: typing.Optional[UserAgentMetadata] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Allows overriding user agent with the given string. + ``userAgentMetadata`` must be set for Client Hint headers to be sent. + + :param user_agent: User agent to use. + :param accept_language: *(Optional)* Browser language to emulate. + :param platform: *(Optional)* The platform navigator.platform should return. + :param user_agent_metadata: **(EXPERIMENTAL)** *(Optional)* To be sent in Sec-CH-UA-* headers and returned in navigator.userAgentData + """ + params: T_JSON_DICT = dict() + params["userAgent"] = user_agent + if accept_language is not None: + params["acceptLanguage"] = accept_language + if platform is not None: + params["platform"] = platform + if user_agent_metadata is not None: + params["userAgentMetadata"] = user_agent_metadata.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setUserAgentOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_automation_override( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Allows overriding the automation flag. + + **EXPERIMENTAL** + + :param enabled: Whether the override should be enabled. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Emulation.setAutomationOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Emulation.virtualTimeBudgetExpired") +@dataclass +class VirtualTimeBudgetExpired: + """ + **EXPERIMENTAL** + + Notification sent after the virtual time budget for the current VirtualTimePolicy has run out. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VirtualTimeBudgetExpired: + return cls()
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/event_breakpoints.html b/docs/_build/html/_modules/nodriver/cdp/event_breakpoints.html new file mode 100644 index 0000000..3da0f64 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/event_breakpoints.html @@ -0,0 +1,367 @@ + + + + + + + + nodriver.cdp.event_breakpoints - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.event_breakpoints

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: EventBreakpoints (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +def set_instrumentation_breakpoint( + event_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets breakpoint on particular native event. + + :param event_name: Instrumentation name to stop on. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + cmd_dict: T_JSON_DICT = { + "method": "EventBreakpoints.setInstrumentationBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_instrumentation_breakpoint( + event_name: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes breakpoint on particular native event. + + :param event_name: Instrumentation name to stop on. + """ + params: T_JSON_DICT = dict() + params["eventName"] = event_name + cmd_dict: T_JSON_DICT = { + "method": "EventBreakpoints.removeInstrumentationBreakpoint", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes all breakpoints + """ + cmd_dict: T_JSON_DICT = { + "method": "EventBreakpoints.disable", + } + json = yield cmd_dict
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/extensions.html b/docs/_build/html/_modules/nodriver/cdp/extensions.html new file mode 100644 index 0000000..bab3c37 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/extensions.html @@ -0,0 +1,336 @@ + + + + + + + + nodriver.cdp.extensions - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.extensions

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Extensions (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +def load_unpacked(path: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Installs an unpacked extension from the filesystem similar to + --load-extension CLI flags. Returns extension ID once the extension + has been installed. + + :param path: Absolute file path. + :returns: Extension id. + """ + params: T_JSON_DICT = dict() + params["path"] = path + cmd_dict: T_JSON_DICT = { + "method": "Extensions.loadUnpacked", + "params": params, + } + json = yield cmd_dict + return str(json["id"])
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/fed_cm.html b/docs/_build/html/_modules/nodriver/cdp/fed_cm.html new file mode 100644 index 0000000..94f7b2f --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/fed_cm.html @@ -0,0 +1,638 @@ + + + + + + + + nodriver.cdp.fed_cm - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.fed_cm

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: FedCm (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class LoginState(enum.Enum): + """ + Whether this is a sign-up or sign-in action for this account, i.e. + whether this account has ever been used to sign in to this RP before. + """ + + SIGN_IN = "SignIn" + SIGN_UP = "SignUp" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> LoginState: + return cls(json)
+ + + +
+[docs] +class DialogType(enum.Enum): + """ + The types of FedCM dialogs. + """ + + ACCOUNT_CHOOSER = "AccountChooser" + AUTO_REAUTHN = "AutoReauthn" + CONFIRM_IDP_LOGIN = "ConfirmIdpLogin" + ERROR = "Error" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DialogType: + return cls(json)
+ + + +
+[docs] +class DialogButton(enum.Enum): + """ + The buttons on the FedCM dialog. + """ + + CONFIRM_IDP_LOGIN_CONTINUE = "ConfirmIdpLoginContinue" + ERROR_GOT_IT = "ErrorGotIt" + ERROR_MORE_DETAILS = "ErrorMoreDetails" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DialogButton: + return cls(json)
+ + + +
+[docs] +class AccountUrlType(enum.Enum): + """ + The URLs that each account has + """ + + TERMS_OF_SERVICE = "TermsOfService" + PRIVACY_POLICY = "PrivacyPolicy" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AccountUrlType: + return cls(json)
+ + + +
+[docs] +@dataclass +class Account: + """ + Corresponds to IdentityRequestAccount + """ + + account_id: str + + email: str + + name: str + + given_name: str + + picture_url: str + + idp_config_url: str + + idp_login_url: str + + login_state: LoginState + + #: These two are only set if the loginState is signUp + terms_of_service_url: typing.Optional[str] = None + + privacy_policy_url: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["accountId"] = self.account_id + json["email"] = self.email + json["name"] = self.name + json["givenName"] = self.given_name + json["pictureUrl"] = self.picture_url + json["idpConfigUrl"] = self.idp_config_url + json["idpLoginUrl"] = self.idp_login_url + json["loginState"] = self.login_state.to_json() + if self.terms_of_service_url is not None: + json["termsOfServiceUrl"] = self.terms_of_service_url + if self.privacy_policy_url is not None: + json["privacyPolicyUrl"] = self.privacy_policy_url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Account: + return cls( + account_id=str(json["accountId"]), + email=str(json["email"]), + name=str(json["name"]), + given_name=str(json["givenName"]), + picture_url=str(json["pictureUrl"]), + idp_config_url=str(json["idpConfigUrl"]), + idp_login_url=str(json["idpLoginUrl"]), + login_state=LoginState.from_json(json["loginState"]), + terms_of_service_url=( + str(json["termsOfServiceUrl"]) + if json.get("termsOfServiceUrl", None) is not None + else None + ), + privacy_policy_url=( + str(json["privacyPolicyUrl"]) + if json.get("privacyPolicyUrl", None) is not None + else None + ), + )
+ + + +
+[docs] +def enable( + disable_rejection_delay: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param disable_rejection_delay: *(Optional)* Allows callers to disable the promise rejection delay that would normally happen, if this is unimportant to what's being tested. (step 4 of https://fedidcg.github.io/FedCM/#browser-api-rp-sign-in) + """ + params: T_JSON_DICT = dict() + if disable_rejection_delay is not None: + params["disableRejectionDelay"] = disable_rejection_delay + cmd_dict: T_JSON_DICT = { + "method": "FedCm.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "FedCm.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def select_account( + dialog_id: str, account_index: int +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param dialog_id: + :param account_index: + """ + params: T_JSON_DICT = dict() + params["dialogId"] = dialog_id + params["accountIndex"] = account_index + cmd_dict: T_JSON_DICT = { + "method": "FedCm.selectAccount", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def click_dialog_button( + dialog_id: str, dialog_button: DialogButton +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param dialog_id: + :param dialog_button: + """ + params: T_JSON_DICT = dict() + params["dialogId"] = dialog_id + params["dialogButton"] = dialog_button.to_json() + cmd_dict: T_JSON_DICT = { + "method": "FedCm.clickDialogButton", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def open_url( + dialog_id: str, account_index: int, account_url_type: AccountUrlType +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param dialog_id: + :param account_index: + :param account_url_type: + """ + params: T_JSON_DICT = dict() + params["dialogId"] = dialog_id + params["accountIndex"] = account_index + params["accountUrlType"] = account_url_type.to_json() + cmd_dict: T_JSON_DICT = { + "method": "FedCm.openUrl", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dismiss_dialog( + dialog_id: str, trigger_cooldown: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param dialog_id: + :param trigger_cooldown: *(Optional)* + """ + params: T_JSON_DICT = dict() + params["dialogId"] = dialog_id + if trigger_cooldown is not None: + params["triggerCooldown"] = trigger_cooldown + cmd_dict: T_JSON_DICT = { + "method": "FedCm.dismissDialog", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def reset_cooldown() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resets the cooldown time, if any, to allow the next FedCM call to show + a dialog even if one was recently dismissed by the user. + """ + cmd_dict: T_JSON_DICT = { + "method": "FedCm.resetCooldown", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("FedCm.dialogShown") +@dataclass +class DialogShown: + dialog_id: str + dialog_type: DialogType + accounts: typing.List[Account] + #: These exist primarily so that the caller can verify the + #: RP context was used appropriately. + title: str + subtitle: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DialogShown: + return cls( + dialog_id=str(json["dialogId"]), + dialog_type=DialogType.from_json(json["dialogType"]), + accounts=[Account.from_json(i) for i in json["accounts"]], + title=str(json["title"]), + subtitle=( + str(json["subtitle"]) + if json.get("subtitle", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("FedCm.dialogClosed") +@dataclass +class DialogClosed: + """ + Triggered when a dialog is closed, either by user action, JS abort, + or a command below. + """ + + dialog_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DialogClosed: + return cls(dialog_id=str(json["dialogId"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/fetch.html b/docs/_build/html/_modules/nodriver/cdp/fetch.html new file mode 100644 index 0000000..d2d7c9b --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/fetch.html @@ -0,0 +1,906 @@ + + + + + + + + nodriver.cdp.fetch - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.fetch

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Fetch
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import io
+from . import network
+from . import page
+
+
+
+[docs] +class RequestId(str): + """ + Unique request identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RequestId: + return cls(json) + + def __repr__(self): + return "RequestId({})".format(super().__repr__())
+ + + +
+[docs] +class RequestStage(enum.Enum): + """ + Stages of the request to handle. Request will intercept before the request is + sent. Response will intercept after the response is received (but before response + body is received). + """ + + REQUEST = "Request" + RESPONSE = "Response" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> RequestStage: + return cls(json)
+ + + +
+[docs] +@dataclass +class RequestPattern: + #: Wildcards (``'*'`` -> zero or more, ``'?'`` -> exactly one) are allowed. Escape character is + #: backslash. Omitting is equivalent to ``"*"``. + url_pattern: typing.Optional[str] = None + + #: If set, only requests for matching resource types will be intercepted. + resource_type: typing.Optional[network.ResourceType] = None + + #: Stage at which to begin intercepting requests. Default is Request. + request_stage: typing.Optional[RequestStage] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.url_pattern is not None: + json["urlPattern"] = self.url_pattern + if self.resource_type is not None: + json["resourceType"] = self.resource_type.to_json() + if self.request_stage is not None: + json["requestStage"] = self.request_stage.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestPattern: + return cls( + url_pattern=( + str(json["urlPattern"]) + if json.get("urlPattern", None) is not None + else None + ), + resource_type=( + network.ResourceType.from_json(json["resourceType"]) + if json.get("resourceType", None) is not None + else None + ), + request_stage=( + RequestStage.from_json(json["requestStage"]) + if json.get("requestStage", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class HeaderEntry: + """ + Response HTTP header entry + """ + + name: str + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> HeaderEntry: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class AuthChallenge: + """ + Authorization challenge for HTTP status code 401 or 407. + """ + + #: Origin of the challenger. + origin: str + + #: The authentication scheme used, such as basic or digest + scheme: str + + #: The realm of the challenge. May be empty. + realm: str + + #: Source of the authentication challenge. + source: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin + json["scheme"] = self.scheme + json["realm"] = self.realm + if self.source is not None: + json["source"] = self.source + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AuthChallenge: + return cls( + origin=str(json["origin"]), + scheme=str(json["scheme"]), + realm=str(json["realm"]), + source=( + str(json["source"]) if json.get("source", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class AuthChallengeResponse: + """ + Response to an AuthChallenge. + """ + + #: The decision on what to do in response to the authorization challenge. Default means + #: deferring to the default behavior of the net stack, which will likely either the Cancel + #: authentication or display a popup dialog box. + response: str + + #: The username to provide, possibly empty. Should only be set if response is + #: ProvideCredentials. + username: typing.Optional[str] = None + + #: The password to provide, possibly empty. Should only be set if response is + #: ProvideCredentials. + password: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["response"] = self.response + if self.username is not None: + json["username"] = self.username + if self.password is not None: + json["password"] = self.password + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AuthChallengeResponse: + return cls( + response=str(json["response"]), + username=( + str(json["username"]) + if json.get("username", None) is not None + else None + ), + password=( + str(json["password"]) + if json.get("password", None) is not None + else None + ), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables the fetch domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "Fetch.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable( + patterns: typing.Optional[typing.List[RequestPattern]] = None, + handle_auth_requests: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables issuing of requestPaused events. A request will be paused until client + calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth. + + :param patterns: *(Optional)* If specified, only requests matching any of these patterns will produce fetchRequested event and will be paused until clients response. If not set, all requests will be affected. + :param handle_auth_requests: *(Optional)* If true, authRequired events will be issued and requests will be paused expecting a call to continueWithAuth. + """ + params: T_JSON_DICT = dict() + if patterns is not None: + params["patterns"] = [i.to_json() for i in patterns] + if handle_auth_requests is not None: + params["handleAuthRequests"] = handle_auth_requests + cmd_dict: T_JSON_DICT = { + "method": "Fetch.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def fail_request( + request_id: RequestId, error_reason: network.ErrorReason +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Causes the request to fail with specified reason. + + :param request_id: An id the client received in requestPaused event. + :param error_reason: Causes the request to fail with the given reason. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + params["errorReason"] = error_reason.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Fetch.failRequest", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def fulfill_request( + request_id: RequestId, + response_code: int, + response_headers: typing.Optional[typing.List[HeaderEntry]] = None, + binary_response_headers: typing.Optional[str] = None, + body: typing.Optional[str] = None, + response_phrase: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + r""" + Provides response to the request. + + :param request_id: An id the client received in requestPaused event. + :param response_code: An HTTP response code. + :param response_headers: *(Optional)* Response headers. + :param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text. (Encoded as a base64 string when passed over JSON) + :param body: *(Optional)* A response body. If absent, original response body will be used if the request is intercepted at the response stage and empty body will be used if the request is intercepted at the request stage. (Encoded as a base64 string when passed over JSON) + :param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + params["responseCode"] = response_code + if response_headers is not None: + params["responseHeaders"] = [i.to_json() for i in response_headers] + if binary_response_headers is not None: + params["binaryResponseHeaders"] = binary_response_headers + if body is not None: + params["body"] = body + if response_phrase is not None: + params["responsePhrase"] = response_phrase + cmd_dict: T_JSON_DICT = { + "method": "Fetch.fulfillRequest", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def continue_request( + request_id: RequestId, + url: typing.Optional[str] = None, + method: typing.Optional[str] = None, + post_data: typing.Optional[str] = None, + headers: typing.Optional[typing.List[HeaderEntry]] = None, + intercept_response: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Continues the request, optionally modifying some of its parameters. + + :param request_id: An id the client received in requestPaused event. + :param url: *(Optional)* If set, the request url will be modified in a way that's not observable by page. + :param method: *(Optional)* If set, the request method is overridden. + :param post_data: *(Optional)* If set, overrides the post data in the request. (Encoded as a base64 string when passed over JSON) + :param headers: *(Optional)* If set, overrides the request headers. Note that the overrides do not extend to subsequent redirect hops, if a redirect happens. Another override may be applied to a different request produced by a redirect. + :param intercept_response: **(EXPERIMENTAL)** *(Optional)* If set, overrides response interception behavior for this request. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + if url is not None: + params["url"] = url + if method is not None: + params["method"] = method + if post_data is not None: + params["postData"] = post_data + if headers is not None: + params["headers"] = [i.to_json() for i in headers] + if intercept_response is not None: + params["interceptResponse"] = intercept_response + cmd_dict: T_JSON_DICT = { + "method": "Fetch.continueRequest", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def continue_with_auth( + request_id: RequestId, auth_challenge_response: AuthChallengeResponse +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Continues a request supplying authChallengeResponse following authRequired event. + + :param request_id: An id the client received in authRequired event. + :param auth_challenge_response: Response to with an authChallenge. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + params["authChallengeResponse"] = auth_challenge_response.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Fetch.continueWithAuth", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def continue_response( + request_id: RequestId, + response_code: typing.Optional[int] = None, + response_phrase: typing.Optional[str] = None, + response_headers: typing.Optional[typing.List[HeaderEntry]] = None, + binary_response_headers: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + r""" + Continues loading of the paused response, optionally modifying the + response headers. If either responseCode or headers are modified, all of them + must be present. + + **EXPERIMENTAL** + + :param request_id: An id the client received in requestPaused event. + :param response_code: *(Optional)* An HTTP response code. If absent, original response code will be used. + :param response_phrase: *(Optional)* A textual representation of responseCode. If absent, a standard phrase matching responseCode is used. + :param response_headers: *(Optional)* Response headers. If absent, original response headers will be used. + :param binary_response_headers: *(Optional)* Alternative way of specifying response headers as a \0-separated series of name: value pairs. Prefer the above method unless you need to represent some non-UTF8 values that can't be transmitted over the protocol as text. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + if response_code is not None: + params["responseCode"] = response_code + if response_phrase is not None: + params["responsePhrase"] = response_phrase + if response_headers is not None: + params["responseHeaders"] = [i.to_json() for i in response_headers] + if binary_response_headers is not None: + params["binaryResponseHeaders"] = binary_response_headers + cmd_dict: T_JSON_DICT = { + "method": "Fetch.continueResponse", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_response_body( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]: + """ + Causes the body of the response to be received from the server and + returned as a single string. May only be issued for a request that + is paused in the Response stage and is mutually exclusive with + takeResponseBodyForInterceptionAsStream. Calling other methods that + affect the request or disabling fetch domain before body is received + results in an undefined behavior. + Note that the response body is not available for redirects. Requests + paused in the _redirect received_ state may be differentiated by + ``responseCode`` and presence of ``location`` response header, see + comments to ``requestPaused`` for details. + + :param request_id: Identifier for the intercepted request to get body for. + :returns: A tuple with the following items: + + 0. **body** - Response body. + 1. **base64Encoded** - True, if content was sent as base64. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Fetch.getResponseBody", + "params": params, + } + json = yield cmd_dict + return (str(json["body"]), bool(json["base64Encoded"]))
+ + + +
+[docs] +def take_response_body_as_stream( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, io.StreamHandle]: + """ + Returns a handle to the stream representing the response body. + The request must be paused in the HeadersReceived stage. + Note that after this command the request can't be continued + as is -- client either needs to cancel it or to provide the + response body. + The stream only supports sequential read, IO.read will fail if the position + is specified. + This method is mutually exclusive with getResponseBody. + Calling other methods that affect the request or disabling fetch + domain before body is received results in an undefined behavior. + + :param request_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Fetch.takeResponseBodyAsStream", + "params": params, + } + json = yield cmd_dict + return io.StreamHandle.from_json(json["stream"])
+ + + +
+[docs] +@event_class("Fetch.requestPaused") +@dataclass +class RequestPaused: + """ + Issued when the domain is enabled and the request URL matches the + specified filter. The request is paused until the client responds + with one of continueRequest, failRequest or fulfillRequest. + The stage of the request can be determined by presence of responseErrorReason + and responseStatusCode -- the request is at the response stage if either + of these fields is present and in the request stage otherwise. + Redirect responses and subsequent requests are reported similarly to regular + responses and requests. Redirect responses may be distinguished by the value + of ``responseStatusCode`` (which is one of 301, 302, 303, 307, 308) along with + presence of the ``location`` header. Requests resulting from a redirect will + have ``redirectedRequestId`` field set. + """ + + #: Each request the page makes will have a unique id. + request_id: RequestId + #: The details of the request. + request: network.Request + #: The id of the frame that initiated the request. + frame_id: page.FrameId + #: How the requested resource will be used. + resource_type: network.ResourceType + #: Response error if intercepted at response stage. + response_error_reason: typing.Optional[network.ErrorReason] + #: Response code if intercepted at response stage. + response_status_code: typing.Optional[int] + #: Response status text if intercepted at response stage. + response_status_text: typing.Optional[str] + #: Response headers if intercepted at the response stage. + response_headers: typing.Optional[typing.List[HeaderEntry]] + #: If the intercepted request had a corresponding Network.requestWillBeSent event fired for it, + #: then this networkId will be the same as the requestId present in the requestWillBeSent event. + network_id: typing.Optional[network.RequestId] + #: If the request is due to a redirect response from the server, the id of the request that + #: has caused the redirect. + redirected_request_id: typing.Optional[RequestId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestPaused: + return cls( + request_id=RequestId.from_json(json["requestId"]), + request=network.Request.from_json(json["request"]), + frame_id=page.FrameId.from_json(json["frameId"]), + resource_type=network.ResourceType.from_json(json["resourceType"]), + response_error_reason=( + network.ErrorReason.from_json(json["responseErrorReason"]) + if json.get("responseErrorReason", None) is not None + else None + ), + response_status_code=( + int(json["responseStatusCode"]) + if json.get("responseStatusCode", None) is not None + else None + ), + response_status_text=( + str(json["responseStatusText"]) + if json.get("responseStatusText", None) is not None + else None + ), + response_headers=( + [HeaderEntry.from_json(i) for i in json["responseHeaders"]] + if json.get("responseHeaders", None) is not None + else None + ), + network_id=( + network.RequestId.from_json(json["networkId"]) + if json.get("networkId", None) is not None + else None + ), + redirected_request_id=( + RequestId.from_json(json["redirectedRequestId"]) + if json.get("redirectedRequestId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Fetch.authRequired") +@dataclass +class AuthRequired: + """ + Issued when the domain is enabled with handleAuthRequests set to true. + The request is paused until client responds with continueWithAuth. + """ + + #: Each request the page makes will have a unique id. + request_id: RequestId + #: The details of the request. + request: network.Request + #: The id of the frame that initiated the request. + frame_id: page.FrameId + #: How the requested resource will be used. + resource_type: network.ResourceType + #: Details of the Authorization Challenge encountered. + #: If this is set, client should respond with continueRequest that + #: contains AuthChallengeResponse. + auth_challenge: AuthChallenge + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AuthRequired: + return cls( + request_id=RequestId.from_json(json["requestId"]), + request=network.Request.from_json(json["request"]), + frame_id=page.FrameId.from_json(json["frameId"]), + resource_type=network.ResourceType.from_json(json["resourceType"]), + auth_challenge=AuthChallenge.from_json(json["authChallenge"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/headless_experimental.html b/docs/_build/html/_modules/nodriver/cdp/headless_experimental.html new file mode 100644 index 0000000..3ee9e90 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/headless_experimental.html @@ -0,0 +1,444 @@ + + + + + + + + nodriver.cdp.headless_experimental - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.headless_experimental

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: HeadlessExperimental (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +@dataclass +class ScreenshotParams: + """ + Encoding options for a screenshot. + """ + + #: Image compression format (defaults to png). + format_: typing.Optional[str] = None + + #: Compression quality from range [0..100] (jpeg and webp only). + quality: typing.Optional[int] = None + + #: Optimize image encoding for speed, not for resulting size (defaults to false) + optimize_for_speed: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.format_ is not None: + json["format"] = self.format_ + if self.quality is not None: + json["quality"] = self.quality + if self.optimize_for_speed is not None: + json["optimizeForSpeed"] = self.optimize_for_speed + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreenshotParams: + return cls( + format_=( + str(json["format"]) if json.get("format", None) is not None else None + ), + quality=( + int(json["quality"]) if json.get("quality", None) is not None else None + ), + optimize_for_speed=( + bool(json["optimizeForSpeed"]) + if json.get("optimizeForSpeed", None) is not None + else None + ), + )
+ + + +
+[docs] +def begin_frame( + frame_time_ticks: typing.Optional[float] = None, + interval: typing.Optional[float] = None, + no_display_updates: typing.Optional[bool] = None, + screenshot: typing.Optional[ScreenshotParams] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[bool, typing.Optional[str]] +]: + """ + Sends a BeginFrame to the target and returns when the frame was completed. Optionally captures a + screenshot from the resulting frame. Requires that the target was created with enabled + BeginFrameControl. Designed for use with --run-all-compositor-stages-before-draw, see also + https://goo.gle/chrome-headless-rendering for more background. + + :param frame_time_ticks: *(Optional)* Timestamp of this BeginFrame in Renderer TimeTicks (milliseconds of uptime). If not set, the current time will be used. + :param interval: *(Optional)* The interval between BeginFrames that is reported to the compositor, in milliseconds. Defaults to a 60 frames/second interval, i.e. about 16.666 milliseconds. + :param no_display_updates: *(Optional)* Whether updates should not be committed and drawn onto the display. False by default. If true, only side effects of the BeginFrame will be run, such as layout and animations, but any visual updates may not be visible on the display or in screenshots. + :param screenshot: *(Optional)* If set, a screenshot of the frame will be captured and returned in the response. Otherwise, no screenshot will be captured. Note that capturing a screenshot can fail, for example, during renderer initialization. In such a case, no screenshot data will be returned. + :returns: A tuple with the following items: + + 0. **hasDamage** - Whether the BeginFrame resulted in damage and, thus, a new frame was committed to the display. Reported for diagnostic uses, may be removed in the future. + 1. **screenshotData** - *(Optional)* Base64-encoded image data of the screenshot, if one was requested and successfully taken. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + if frame_time_ticks is not None: + params["frameTimeTicks"] = frame_time_ticks + if interval is not None: + params["interval"] = interval + if no_display_updates is not None: + params["noDisplayUpdates"] = no_display_updates + if screenshot is not None: + params["screenshot"] = screenshot.to_json() + cmd_dict: T_JSON_DICT = { + "method": "HeadlessExperimental.beginFrame", + "params": params, + } + json = yield cmd_dict + return ( + bool(json["hasDamage"]), + ( + str(json["screenshotData"]) + if json.get("screenshotData", None) is not None + else None + ), + )
+ + + +
+[docs] +@deprecated(version="1.3") +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables headless events for the target. + + .. deprecated:: 1.3 + """ + cmd_dict: T_JSON_DICT = { + "method": "HeadlessExperimental.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables headless events for the target. + + .. deprecated:: 1.3 + """ + cmd_dict: T_JSON_DICT = { + "method": "HeadlessExperimental.enable", + } + json = yield cmd_dict
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/heap_profiler.html b/docs/_build/html/_modules/nodriver/cdp/heap_profiler.html new file mode 100644 index 0000000..d321940 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/heap_profiler.html @@ -0,0 +1,770 @@ + + + + + + + + nodriver.cdp.heap_profiler - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.heap_profiler

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: HeapProfiler (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import runtime
+
+
+
+[docs] +class HeapSnapshotObjectId(str): + """ + Heap snapshot object id. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> HeapSnapshotObjectId: + return cls(json) + + def __repr__(self): + return "HeapSnapshotObjectId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class SamplingHeapProfileNode: + """ + Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes. + """ + + #: Function location. + call_frame: runtime.CallFrame + + #: Allocations size in bytes for the node excluding children. + self_size: float + + #: Node id. Ids are unique across all profiles collected between startSampling and stopSampling. + id_: int + + #: Child nodes. + children: typing.List[SamplingHeapProfileNode] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["callFrame"] = self.call_frame.to_json() + json["selfSize"] = self.self_size + json["id"] = self.id_ + json["children"] = [i.to_json() for i in self.children] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SamplingHeapProfileNode: + return cls( + call_frame=runtime.CallFrame.from_json(json["callFrame"]), + self_size=float(json["selfSize"]), + id_=int(json["id"]), + children=[SamplingHeapProfileNode.from_json(i) for i in json["children"]], + )
+ + + +
+[docs] +@dataclass +class SamplingHeapProfileSample: + """ + A single sample from a sampling profile. + """ + + #: Allocation size in bytes attributed to the sample. + size: float + + #: Id of the corresponding profile tree node. + node_id: int + + #: Time-ordered sample ordinal number. It is unique across all profiles retrieved + #: between startSampling and stopSampling. + ordinal: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["size"] = self.size + json["nodeId"] = self.node_id + json["ordinal"] = self.ordinal + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SamplingHeapProfileSample: + return cls( + size=float(json["size"]), + node_id=int(json["nodeId"]), + ordinal=float(json["ordinal"]), + )
+ + + +
+[docs] +@dataclass +class SamplingHeapProfile: + """ + Sampling profile. + """ + + head: SamplingHeapProfileNode + + samples: typing.List[SamplingHeapProfileSample] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["head"] = self.head.to_json() + json["samples"] = [i.to_json() for i in self.samples] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SamplingHeapProfile: + return cls( + head=SamplingHeapProfileNode.from_json(json["head"]), + samples=[SamplingHeapProfileSample.from_json(i) for i in json["samples"]], + )
+ + + +
+[docs] +def add_inspected_heap_object( + heap_object_id: HeapSnapshotObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables console to refer to the node with given id via $x (see Command Line API for more details + $x functions). + + :param heap_object_id: Heap snapshot object id to be accessible by means of $x command line API. + """ + params: T_JSON_DICT = dict() + params["heapObjectId"] = heap_object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.addInspectedHeapObject", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def collect_garbage() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.collectGarbage", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_heap_object_id( + object_id: runtime.RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, HeapSnapshotObjectId]: + """ + :param object_id: Identifier of the object to get heap object id for. + :returns: Id of the heap snapshot object corresponding to the passed remote object id. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.getHeapObjectId", + "params": params, + } + json = yield cmd_dict + return HeapSnapshotObjectId.from_json(json["heapSnapshotObjectId"])
+ + + +
+[docs] +def get_object_by_heap_object_id( + object_id: HeapSnapshotObjectId, object_group: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.RemoteObject]: + """ + :param object_id: + :param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. + :returns: Evaluation result. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + if object_group is not None: + params["objectGroup"] = object_group + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.getObjectByHeapObjectId", + "params": params, + } + json = yield cmd_dict + return runtime.RemoteObject.from_json(json["result"])
+ + + +
+[docs] +def get_sampling_profile() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, SamplingHeapProfile] +): + """ + + + :returns: Return the sampling profile being collected. + """ + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.getSamplingProfile", + } + json = yield cmd_dict + return SamplingHeapProfile.from_json(json["profile"])
+ + + +
+[docs] +def start_sampling( + sampling_interval: typing.Optional[float] = None, + include_objects_collected_by_major_gc: typing.Optional[bool] = None, + include_objects_collected_by_minor_gc: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param sampling_interval: *(Optional)* Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes. + :param include_objects_collected_by_major_gc: *(Optional)* By default, the sampling heap profiler reports only objects which are still alive when the profile is returned via getSamplingProfile or stopSampling, which is useful for determining what functions contribute the most to steady-state memory usage. This flag instructs the sampling heap profiler to also include information about objects discarded by major GC, which will show which functions cause large temporary memory usage or long GC pauses. + :param include_objects_collected_by_minor_gc: *(Optional)* By default, the sampling heap profiler reports only objects which are still alive when the profile is returned via getSamplingProfile or stopSampling, which is useful for determining what functions contribute the most to steady-state memory usage. This flag instructs the sampling heap profiler to also include information about objects discarded by minor GC, which is useful when tuning a latency-sensitive application for minimal GC activity. + """ + params: T_JSON_DICT = dict() + if sampling_interval is not None: + params["samplingInterval"] = sampling_interval + if include_objects_collected_by_major_gc is not None: + params["includeObjectsCollectedByMajorGC"] = ( + include_objects_collected_by_major_gc + ) + if include_objects_collected_by_minor_gc is not None: + params["includeObjectsCollectedByMinorGC"] = ( + include_objects_collected_by_minor_gc + ) + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.startSampling", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_tracking_heap_objects( + track_allocations: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param track_allocations: *(Optional)* + """ + params: T_JSON_DICT = dict() + if track_allocations is not None: + params["trackAllocations"] = track_allocations + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.startTrackingHeapObjects", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_sampling() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SamplingHeapProfile]: + """ + + + :returns: Recorded sampling heap profile. + """ + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.stopSampling", + } + json = yield cmd_dict + return SamplingHeapProfile.from_json(json["profile"])
+ + + +
+[docs] +def stop_tracking_heap_objects( + report_progress: typing.Optional[bool] = None, + treat_global_objects_as_roots: typing.Optional[bool] = None, + capture_numeric_value: typing.Optional[bool] = None, + expose_internals: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped. + :param treat_global_objects_as_roots: **(DEPRECATED)** *(Optional)* Deprecated in favor of ```exposeInternals```. + :param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot + :param expose_internals: **(EXPERIMENTAL)** *(Optional)* If true, exposes internals of the snapshot. + """ + params: T_JSON_DICT = dict() + if report_progress is not None: + params["reportProgress"] = report_progress + if treat_global_objects_as_roots is not None: + params["treatGlobalObjectsAsRoots"] = treat_global_objects_as_roots + if capture_numeric_value is not None: + params["captureNumericValue"] = capture_numeric_value + if expose_internals is not None: + params["exposeInternals"] = expose_internals + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.stopTrackingHeapObjects", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def take_heap_snapshot( + report_progress: typing.Optional[bool] = None, + treat_global_objects_as_roots: typing.Optional[bool] = None, + capture_numeric_value: typing.Optional[bool] = None, + expose_internals: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param report_progress: *(Optional)* If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken. + :param treat_global_objects_as_roots: **(DEPRECATED)** *(Optional)* If true, a raw snapshot without artificial roots will be generated. Deprecated in favor of ```exposeInternals```. + :param capture_numeric_value: *(Optional)* If true, numerical values are included in the snapshot + :param expose_internals: **(EXPERIMENTAL)** *(Optional)* If true, exposes internals of the snapshot. + """ + params: T_JSON_DICT = dict() + if report_progress is not None: + params["reportProgress"] = report_progress + if treat_global_objects_as_roots is not None: + params["treatGlobalObjectsAsRoots"] = treat_global_objects_as_roots + if capture_numeric_value is not None: + params["captureNumericValue"] = capture_numeric_value + if expose_internals is not None: + params["exposeInternals"] = expose_internals + cmd_dict: T_JSON_DICT = { + "method": "HeapProfiler.takeHeapSnapshot", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("HeapProfiler.addHeapSnapshotChunk") +@dataclass +class AddHeapSnapshotChunk: + chunk: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AddHeapSnapshotChunk: + return cls(chunk=str(json["chunk"]))
+ + + +
+[docs] +@event_class("HeapProfiler.heapStatsUpdate") +@dataclass +class HeapStatsUpdate: + """ + If heap objects tracking has been started then backend may send update for one or more fragments + """ + + #: An array of triplets. Each triplet describes a fragment. The first integer is the fragment + #: index, the second integer is a total count of objects for the fragment, the third integer is + #: a total size of the objects for the fragment. + stats_update: typing.List[int] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> HeapStatsUpdate: + return cls(stats_update=[int(i) for i in json["statsUpdate"]])
+ + + +
+[docs] +@event_class("HeapProfiler.lastSeenObjectId") +@dataclass +class LastSeenObjectId: + """ + If heap objects tracking has been started then backend regularly sends a current value for last + seen object id and corresponding timestamp. If the were changes in the heap since last event + then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event. + """ + + last_seen_object_id: int + timestamp: float + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LastSeenObjectId: + return cls( + last_seen_object_id=int(json["lastSeenObjectId"]), + timestamp=float(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("HeapProfiler.reportHeapSnapshotProgress") +@dataclass +class ReportHeapSnapshotProgress: + done: int + total: int + finished: typing.Optional[bool] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportHeapSnapshotProgress: + return cls( + done=int(json["done"]), + total=int(json["total"]), + finished=( + bool(json["finished"]) + if json.get("finished", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("HeapProfiler.resetProfiles") +@dataclass +class ResetProfiles: + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResetProfiles: + return cls()
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/indexed_db.html b/docs/_build/html/_modules/nodriver/cdp/indexed_db.html new file mode 100644 index 0000000..024d7a2 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/indexed_db.html @@ -0,0 +1,905 @@ + + + + + + + + nodriver.cdp.indexed_db - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.indexed_db

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: IndexedDB (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import runtime
+from . import storage
+
+
+
+[docs] +@dataclass +class DatabaseWithObjectStores: + """ + Database with an array of object stores. + """ + + #: Database name. + name: str + + #: Database version (type is not 'integer', as the standard + #: requires the version number to be 'unsigned long long') + version: float + + #: Object stores in this database. + object_stores: typing.List[ObjectStore] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["version"] = self.version + json["objectStores"] = [i.to_json() for i in self.object_stores] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DatabaseWithObjectStores: + return cls( + name=str(json["name"]), + version=float(json["version"]), + object_stores=[ObjectStore.from_json(i) for i in json["objectStores"]], + )
+ + + +
+[docs] +@dataclass +class ObjectStore: + """ + Object store. + """ + + #: Object store name. + name: str + + #: Object store key path. + key_path: KeyPath + + #: If true, object store has auto increment flag set. + auto_increment: bool + + #: Indexes in this object store. + indexes: typing.List[ObjectStoreIndex] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["keyPath"] = self.key_path.to_json() + json["autoIncrement"] = self.auto_increment + json["indexes"] = [i.to_json() for i in self.indexes] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ObjectStore: + return cls( + name=str(json["name"]), + key_path=KeyPath.from_json(json["keyPath"]), + auto_increment=bool(json["autoIncrement"]), + indexes=[ObjectStoreIndex.from_json(i) for i in json["indexes"]], + )
+ + + +
+[docs] +@dataclass +class ObjectStoreIndex: + """ + Object store index. + """ + + #: Index name. + name: str + + #: Index key path. + key_path: KeyPath + + #: If true, index is unique. + unique: bool + + #: If true, index allows multiple entries for a key. + multi_entry: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["keyPath"] = self.key_path.to_json() + json["unique"] = self.unique + json["multiEntry"] = self.multi_entry + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ObjectStoreIndex: + return cls( + name=str(json["name"]), + key_path=KeyPath.from_json(json["keyPath"]), + unique=bool(json["unique"]), + multi_entry=bool(json["multiEntry"]), + )
+ + + +
+[docs] +@dataclass +class Key: + """ + Key. + """ + + #: Key type. + type_: str + + #: Number value. + number: typing.Optional[float] = None + + #: String value. + string: typing.Optional[str] = None + + #: Date value. + date: typing.Optional[float] = None + + #: Array value. + array: typing.Optional[typing.List[Key]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.number is not None: + json["number"] = self.number + if self.string is not None: + json["string"] = self.string + if self.date is not None: + json["date"] = self.date + if self.array is not None: + json["array"] = [i.to_json() for i in self.array] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Key: + return cls( + type_=str(json["type"]), + number=( + float(json["number"]) if json.get("number", None) is not None else None + ), + string=( + str(json["string"]) if json.get("string", None) is not None else None + ), + date=float(json["date"]) if json.get("date", None) is not None else None, + array=( + [Key.from_json(i) for i in json["array"]] + if json.get("array", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class KeyRange: + """ + Key range. + """ + + #: If true lower bound is open. + lower_open: bool + + #: If true upper bound is open. + upper_open: bool + + #: Lower bound. + lower: typing.Optional[Key] = None + + #: Upper bound. + upper: typing.Optional[Key] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["lowerOpen"] = self.lower_open + json["upperOpen"] = self.upper_open + if self.lower is not None: + json["lower"] = self.lower.to_json() + if self.upper is not None: + json["upper"] = self.upper.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> KeyRange: + return cls( + lower_open=bool(json["lowerOpen"]), + upper_open=bool(json["upperOpen"]), + lower=( + Key.from_json(json["lower"]) + if json.get("lower", None) is not None + else None + ), + upper=( + Key.from_json(json["upper"]) + if json.get("upper", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class DataEntry: + """ + Data entry. + """ + + #: Key object. + key: runtime.RemoteObject + + #: Primary key object. + primary_key: runtime.RemoteObject + + #: Value object. + value: runtime.RemoteObject + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key.to_json() + json["primaryKey"] = self.primary_key.to_json() + json["value"] = self.value.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DataEntry: + return cls( + key=runtime.RemoteObject.from_json(json["key"]), + primary_key=runtime.RemoteObject.from_json(json["primaryKey"]), + value=runtime.RemoteObject.from_json(json["value"]), + )
+ + + +
+[docs] +@dataclass +class KeyPath: + """ + Key path. + """ + + #: Key path type. + type_: str + + #: String value. + string: typing.Optional[str] = None + + #: Array value. + array: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.string is not None: + json["string"] = self.string + if self.array is not None: + json["array"] = [i for i in self.array] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> KeyPath: + return cls( + type_=str(json["type"]), + string=( + str(json["string"]) if json.get("string", None) is not None else None + ), + array=( + [str(i) for i in json["array"]] + if json.get("array", None) is not None + else None + ), + )
+ + + +
+[docs] +def clear_object_store( + database_name: str, + object_store_name: str, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears all entries from an object store. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: Database name. + :param object_store_name: Object store name. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + params["objectStoreName"] = object_store_name + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.clearObjectStore", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_database( + database_name: str, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes a database. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: Database name. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.deleteDatabase", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_object_store_entries( + database_name: str, + object_store_name: str, + key_range: KeyRange, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Delete a range of entries from an object store + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: + :param object_store_name: + :param key_range: Range of entry keys to delete + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + params["objectStoreName"] = object_store_name + params["keyRange"] = key_range.to_json() + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.deleteObjectStoreEntries", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables events from backend. + """ + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables events from backend. + """ + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def request_data( + database_name: str, + object_store_name: str, + index_name: str, + skip_count: int, + page_size: int, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, + key_range: typing.Optional[KeyRange] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.List[DataEntry], bool] +]: + """ + Requests data from object store or index. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: Database name. + :param object_store_name: Object store name. + :param index_name: Index name, empty string for object store data requests. + :param skip_count: Number of records to skip. + :param page_size: Number of records to fetch. + :param key_range: *(Optional)* Key range. + :returns: A tuple with the following items: + + 0. **objectStoreDataEntries** - Array of object store data entries. + 1. **hasMore** - If true, there are more entries to fetch in the given range. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + params["objectStoreName"] = object_store_name + params["indexName"] = index_name + params["skipCount"] = skip_count + params["pageSize"] = page_size + if key_range is not None: + params["keyRange"] = key_range.to_json() + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.requestData", + "params": params, + } + json = yield cmd_dict + return ( + [DataEntry.from_json(i) for i in json["objectStoreDataEntries"]], + bool(json["hasMore"]), + )
+ + + +
+[docs] +def get_metadata( + database_name: str, + object_store_name: str, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[float, float]]: + """ + Gets metadata of an object store. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: Database name. + :param object_store_name: Object store name. + :returns: A tuple with the following items: + + 0. **entriesCount** - the entries count + 1. **keyGeneratorValue** - the current value of key generator, to become the next inserted key into the object store. Valid if objectStore.autoIncrement is true. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + params["objectStoreName"] = object_store_name + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.getMetadata", + "params": params, + } + json = yield cmd_dict + return (float(json["entriesCount"]), float(json["keyGeneratorValue"]))
+ + + +
+[docs] +def request_database( + database_name: str, + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, DatabaseWithObjectStores]: + """ + Requests database with given name in given frame. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :param database_name: Database name. + :returns: Database with an array of object stores. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + params["databaseName"] = database_name + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.requestDatabase", + "params": params, + } + json = yield cmd_dict + return DatabaseWithObjectStores.from_json(json["databaseWithObjectStores"])
+ + + +
+[docs] +def request_database_names( + security_origin: typing.Optional[str] = None, + storage_key: typing.Optional[str] = None, + storage_bucket: typing.Optional[storage.StorageBucket] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Requests database names for given security origin. + + :param security_origin: *(Optional)* At least and at most one of securityOrigin, storageKey, or storageBucket must be specified. Security origin. + :param storage_key: *(Optional)* Storage key. + :param storage_bucket: *(Optional)* Storage bucket. If not specified, it uses the default bucket. + :returns: Database names for origin. + """ + params: T_JSON_DICT = dict() + if security_origin is not None: + params["securityOrigin"] = security_origin + if storage_key is not None: + params["storageKey"] = storage_key + if storage_bucket is not None: + params["storageBucket"] = storage_bucket.to_json() + cmd_dict: T_JSON_DICT = { + "method": "IndexedDB.requestDatabaseNames", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["databaseNames"]]
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/input_.html b/docs/_build/html/_modules/nodriver/cdp/input_.html new file mode 100644 index 0000000..d0892f1 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/input_.html @@ -0,0 +1,1087 @@ + + + + + + + + nodriver.cdp.input_ - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.input_

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Input
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +@dataclass +class TouchPoint: + #: X coordinate of the event relative to the main frame's viewport in CSS pixels. + x: float + + #: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to + #: the top of the viewport and Y increases as it proceeds towards the bottom of the viewport. + y: float + + #: X radius of the touch area (default: 1.0). + radius_x: typing.Optional[float] = None + + #: Y radius of the touch area (default: 1.0). + radius_y: typing.Optional[float] = None + + #: Rotation angle (default: 0.0). + rotation_angle: typing.Optional[float] = None + + #: Force (default: 1.0). + force: typing.Optional[float] = None + + #: The normalized tangential pressure, which has a range of [-1,1] (default: 0). + tangential_pressure: typing.Optional[float] = None + + #: The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0) + tilt_x: typing.Optional[float] = None + + #: The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0). + tilt_y: typing.Optional[float] = None + + #: The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0). + twist: typing.Optional[int] = None + + #: Identifier used to track touch sources between events, must be unique within an event. + id_: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + if self.radius_x is not None: + json["radiusX"] = self.radius_x + if self.radius_y is not None: + json["radiusY"] = self.radius_y + if self.rotation_angle is not None: + json["rotationAngle"] = self.rotation_angle + if self.force is not None: + json["force"] = self.force + if self.tangential_pressure is not None: + json["tangentialPressure"] = self.tangential_pressure + if self.tilt_x is not None: + json["tiltX"] = self.tilt_x + if self.tilt_y is not None: + json["tiltY"] = self.tilt_y + if self.twist is not None: + json["twist"] = self.twist + if self.id_ is not None: + json["id"] = self.id_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TouchPoint: + return cls( + x=float(json["x"]), + y=float(json["y"]), + radius_x=( + float(json["radiusX"]) + if json.get("radiusX", None) is not None + else None + ), + radius_y=( + float(json["radiusY"]) + if json.get("radiusY", None) is not None + else None + ), + rotation_angle=( + float(json["rotationAngle"]) + if json.get("rotationAngle", None) is not None + else None + ), + force=float(json["force"]) if json.get("force", None) is not None else None, + tangential_pressure=( + float(json["tangentialPressure"]) + if json.get("tangentialPressure", None) is not None + else None + ), + tilt_x=( + float(json["tiltX"]) if json.get("tiltX", None) is not None else None + ), + tilt_y=( + float(json["tiltY"]) if json.get("tiltY", None) is not None else None + ), + twist=int(json["twist"]) if json.get("twist", None) is not None else None, + id_=float(json["id"]) if json.get("id", None) is not None else None, + )
+ + + +
+[docs] +class GestureSourceType(enum.Enum): + DEFAULT = "default" + TOUCH = "touch" + MOUSE = "mouse" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> GestureSourceType: + return cls(json)
+ + + +
+[docs] +class MouseButton(enum.Enum): + NONE = "none" + LEFT = "left" + MIDDLE = "middle" + RIGHT = "right" + BACK = "back" + FORWARD = "forward" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> MouseButton: + return cls(json)
+ + + +
+[docs] +class TimeSinceEpoch(float): + """ + UTC time in seconds, counted from January 1, 1970. + """ + + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> TimeSinceEpoch: + return cls(json) + + def __repr__(self): + return "TimeSinceEpoch({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class DragDataItem: + #: Mime type of the dragged data. + mime_type: str + + #: Depending of the value of ``mimeType``, it contains the dragged link, + #: text, HTML markup or any other data. + data: str + + #: Title associated with a link. Only valid when ``mimeType`` == "text/uri-list". + title: typing.Optional[str] = None + + #: Stores the base URL for the contained markup. Only valid when ``mimeType`` + #: == "text/html". + base_url: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["mimeType"] = self.mime_type + json["data"] = self.data + if self.title is not None: + json["title"] = self.title + if self.base_url is not None: + json["baseURL"] = self.base_url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DragDataItem: + return cls( + mime_type=str(json["mimeType"]), + data=str(json["data"]), + title=str(json["title"]) if json.get("title", None) is not None else None, + base_url=( + str(json["baseURL"]) if json.get("baseURL", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class DragData: + items: typing.List[DragDataItem] + + #: Bit field representing allowed drag operations. Copy = 1, Link = 2, Move = 16 + drag_operations_mask: int + + #: List of filenames that should be included when dropping + files: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["items"] = [i.to_json() for i in self.items] + json["dragOperationsMask"] = self.drag_operations_mask + if self.files is not None: + json["files"] = [i for i in self.files] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DragData: + return cls( + items=[DragDataItem.from_json(i) for i in json["items"]], + drag_operations_mask=int(json["dragOperationsMask"]), + files=( + [str(i) for i in json["files"]] + if json.get("files", None) is not None + else None + ), + )
+ + + +
+[docs] +def dispatch_drag_event( + type_: str, + x: float, + y: float, + data: DragData, + modifiers: typing.Optional[int] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Dispatches a drag event into the page. + + **EXPERIMENTAL** + + :param type_: Type of the drag event. + :param x: X coordinate of the event relative to the main frame's viewport in CSS pixels. + :param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport. + :param data: + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + params["x"] = x + params["y"] = y + params["data"] = data.to_json() + if modifiers is not None: + params["modifiers"] = modifiers + cmd_dict: T_JSON_DICT = { + "method": "Input.dispatchDragEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dispatch_key_event( + type_: str, + modifiers: typing.Optional[int] = None, + timestamp: typing.Optional[TimeSinceEpoch] = None, + text: typing.Optional[str] = None, + unmodified_text: typing.Optional[str] = None, + key_identifier: typing.Optional[str] = None, + code: typing.Optional[str] = None, + key: typing.Optional[str] = None, + windows_virtual_key_code: typing.Optional[int] = None, + native_virtual_key_code: typing.Optional[int] = None, + auto_repeat: typing.Optional[bool] = None, + is_keypad: typing.Optional[bool] = None, + is_system_key: typing.Optional[bool] = None, + location: typing.Optional[int] = None, + commands: typing.Optional[typing.List[str]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Dispatches a key event to the page. + + :param type_: Type of the key event. + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + :param timestamp: *(Optional)* Time at which the event occurred. + :param text: *(Optional)* Text as generated by processing a virtual key code with a keyboard layout. Not needed for for ```keyUp```` and ````rawKeyDown```` events (default: "") + :param unmodified_text: *(Optional)* Text that would have been generated by the keyboard if no modifiers were pressed (except for shift). Useful for shortcut (accelerator) key handling (default: ""). + :param key_identifier: *(Optional)* Unique key identifier (e.g., 'U+0041') (default: ""). + :param code: *(Optional)* Unique DOM defined string value for each physical key (e.g., 'KeyA') (default: ""). + :param key: *(Optional)* Unique DOM defined string value describing the meaning of the key in the context of active modifiers, keyboard layout, etc (e.g., 'AltGr') (default: ""). + :param windows_virtual_key_code: *(Optional)* Windows virtual key code (default: 0). + :param native_virtual_key_code: *(Optional)* Native virtual key code (default: 0). + :param auto_repeat: *(Optional)* Whether the event was generated from auto repeat (default: false). + :param is_keypad: *(Optional)* Whether the event was generated from the keypad (default: false). + :param is_system_key: *(Optional)* Whether the event was a system key event (default: false). + :param location: *(Optional)* Whether the event was from the left or right side of the keyboard. 1=Left, 2=Right (default: 0). + :param commands: **(EXPERIMENTAL)** *(Optional)* Editing commands to send with the key event (e.g., 'selectAll') (default: []). These are related to but not equal the command names used in ````document.execCommand``` and NSStandardKeyBindingResponding. See https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/core/editing/commands/editor_command_names.h for valid command names. + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + if modifiers is not None: + params["modifiers"] = modifiers + if timestamp is not None: + params["timestamp"] = timestamp.to_json() + if text is not None: + params["text"] = text + if unmodified_text is not None: + params["unmodifiedText"] = unmodified_text + if key_identifier is not None: + params["keyIdentifier"] = key_identifier + if code is not None: + params["code"] = code + if key is not None: + params["key"] = key + if windows_virtual_key_code is not None: + params["windowsVirtualKeyCode"] = windows_virtual_key_code + if native_virtual_key_code is not None: + params["nativeVirtualKeyCode"] = native_virtual_key_code + if auto_repeat is not None: + params["autoRepeat"] = auto_repeat + if is_keypad is not None: + params["isKeypad"] = is_keypad + if is_system_key is not None: + params["isSystemKey"] = is_system_key + if location is not None: + params["location"] = location + if commands is not None: + params["commands"] = [i for i in commands] + cmd_dict: T_JSON_DICT = { + "method": "Input.dispatchKeyEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def insert_text(text: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + This method emulates inserting text that doesn't come from a key press, + for example an emoji keyboard or an IME. + + **EXPERIMENTAL** + + :param text: The text to insert. + """ + params: T_JSON_DICT = dict() + params["text"] = text + cmd_dict: T_JSON_DICT = { + "method": "Input.insertText", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def ime_set_composition( + text: str, + selection_start: int, + selection_end: int, + replacement_start: typing.Optional[int] = None, + replacement_end: typing.Optional[int] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + This method sets the current candidate text for IME. + Use imeCommitComposition to commit the final text. + Use imeSetComposition with empty string as text to cancel composition. + + **EXPERIMENTAL** + + :param text: The text to insert + :param selection_start: selection start + :param selection_end: selection end + :param replacement_start: *(Optional)* replacement start + :param replacement_end: *(Optional)* replacement end + """ + params: T_JSON_DICT = dict() + params["text"] = text + params["selectionStart"] = selection_start + params["selectionEnd"] = selection_end + if replacement_start is not None: + params["replacementStart"] = replacement_start + if replacement_end is not None: + params["replacementEnd"] = replacement_end + cmd_dict: T_JSON_DICT = { + "method": "Input.imeSetComposition", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dispatch_mouse_event( + type_: str, + x: float, + y: float, + modifiers: typing.Optional[int] = None, + timestamp: typing.Optional[TimeSinceEpoch] = None, + button: typing.Optional[MouseButton] = None, + buttons: typing.Optional[int] = None, + click_count: typing.Optional[int] = None, + force: typing.Optional[float] = None, + tangential_pressure: typing.Optional[float] = None, + tilt_x: typing.Optional[float] = None, + tilt_y: typing.Optional[float] = None, + twist: typing.Optional[int] = None, + delta_x: typing.Optional[float] = None, + delta_y: typing.Optional[float] = None, + pointer_type: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Dispatches a mouse event to the page. + + :param type_: Type of the mouse event. + :param x: X coordinate of the event relative to the main frame's viewport in CSS pixels. + :param y: Y coordinate of the event relative to the main frame's viewport in CSS pixels. 0 refers to the top of the viewport and Y increases as it proceeds towards the bottom of the viewport. + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + :param timestamp: *(Optional)* Time at which the event occurred. + :param button: *(Optional)* Mouse button (default: "none"). + :param buttons: *(Optional)* A number indicating which buttons are pressed on the mouse when a mouse event is triggered. Left=1, Right=2, Middle=4, Back=8, Forward=16, None=0. + :param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0). + :param force: **(EXPERIMENTAL)** *(Optional)* The normalized pressure, which has a range of [0,1] (default: 0). + :param tangential_pressure: **(EXPERIMENTAL)** *(Optional)* The normalized tangential pressure, which has a range of [-1,1] (default: 0). + :param tilt_x: *(Optional)* The plane angle between the Y-Z plane and the plane containing both the stylus axis and the Y axis, in degrees of the range [-90,90], a positive tiltX is to the right (default: 0). + :param tilt_y: *(Optional)* The plane angle between the X-Z plane and the plane containing both the stylus axis and the X axis, in degrees of the range [-90,90], a positive tiltY is towards the user (default: 0). + :param twist: **(EXPERIMENTAL)** *(Optional)* The clockwise rotation of a pen stylus around its own major axis, in degrees in the range [0,359] (default: 0). + :param delta_x: *(Optional)* X delta in CSS pixels for mouse wheel event (default: 0). + :param delta_y: *(Optional)* Y delta in CSS pixels for mouse wheel event (default: 0). + :param pointer_type: *(Optional)* Pointer type (default: "mouse"). + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + params["x"] = x + params["y"] = y + if modifiers is not None: + params["modifiers"] = modifiers + if timestamp is not None: + params["timestamp"] = timestamp.to_json() + if button is not None: + params["button"] = button.to_json() + if buttons is not None: + params["buttons"] = buttons + if click_count is not None: + params["clickCount"] = click_count + if force is not None: + params["force"] = force + if tangential_pressure is not None: + params["tangentialPressure"] = tangential_pressure + if tilt_x is not None: + params["tiltX"] = tilt_x + if tilt_y is not None: + params["tiltY"] = tilt_y + if twist is not None: + params["twist"] = twist + if delta_x is not None: + params["deltaX"] = delta_x + if delta_y is not None: + params["deltaY"] = delta_y + if pointer_type is not None: + params["pointerType"] = pointer_type + cmd_dict: T_JSON_DICT = { + "method": "Input.dispatchMouseEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dispatch_touch_event( + type_: str, + touch_points: typing.List[TouchPoint], + modifiers: typing.Optional[int] = None, + timestamp: typing.Optional[TimeSinceEpoch] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Dispatches a touch event to the page. + + :param type_: Type of the touch event. TouchEnd and TouchCancel must not contain any touch points, while TouchStart and TouchMove must contains at least one. + :param touch_points: Active touch points on the touch device. One event per any changed point (compared to previous touch event in a sequence) is generated, emulating pressing/moving/releasing points one by one. + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + :param timestamp: *(Optional)* Time at which the event occurred. + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + params["touchPoints"] = [i.to_json() for i in touch_points] + if modifiers is not None: + params["modifiers"] = modifiers + if timestamp is not None: + params["timestamp"] = timestamp.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Input.dispatchTouchEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def cancel_dragging() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Cancels any active dragging in the page. + """ + cmd_dict: T_JSON_DICT = { + "method": "Input.cancelDragging", + } + json = yield cmd_dict
+ + + +
+[docs] +def emulate_touch_from_mouse_event( + type_: str, + x: int, + y: int, + button: MouseButton, + timestamp: typing.Optional[TimeSinceEpoch] = None, + delta_x: typing.Optional[float] = None, + delta_y: typing.Optional[float] = None, + modifiers: typing.Optional[int] = None, + click_count: typing.Optional[int] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Emulates touch event from the mouse event parameters. + + **EXPERIMENTAL** + + :param type_: Type of the mouse event. + :param x: X coordinate of the mouse pointer in DIP. + :param y: Y coordinate of the mouse pointer in DIP. + :param button: Mouse button. Only "none", "left", "right" are supported. + :param timestamp: *(Optional)* Time at which the event occurred (default: current time). + :param delta_x: *(Optional)* X delta in DIP for mouse wheel event (default: 0). + :param delta_y: *(Optional)* Y delta in DIP for mouse wheel event (default: 0). + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + :param click_count: *(Optional)* Number of times the mouse button was clicked (default: 0). + """ + params: T_JSON_DICT = dict() + params["type"] = type_ + params["x"] = x + params["y"] = y + params["button"] = button.to_json() + if timestamp is not None: + params["timestamp"] = timestamp.to_json() + if delta_x is not None: + params["deltaX"] = delta_x + if delta_y is not None: + params["deltaY"] = delta_y + if modifiers is not None: + params["modifiers"] = modifiers + if click_count is not None: + params["clickCount"] = click_count + cmd_dict: T_JSON_DICT = { + "method": "Input.emulateTouchFromMouseEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_ignore_input_events( + ignore: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Ignores input events (useful while auditing page). + + :param ignore: Ignores input events processing when set to true. + """ + params: T_JSON_DICT = dict() + params["ignore"] = ignore + cmd_dict: T_JSON_DICT = { + "method": "Input.setIgnoreInputEvents", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_intercept_drags( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Prevents default drag and drop behavior and instead emits ``Input.dragIntercepted`` events. + Drag and drop behavior can be directly controlled via ``Input.dispatchDragEvent``. + + **EXPERIMENTAL** + + :param enabled: + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Input.setInterceptDrags", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def synthesize_pinch_gesture( + x: float, + y: float, + scale_factor: float, + relative_speed: typing.Optional[int] = None, + gesture_source_type: typing.Optional[GestureSourceType] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Synthesizes a pinch gesture over a time period by issuing appropriate touch events. + + **EXPERIMENTAL** + + :param x: X coordinate of the start of the gesture in CSS pixels. + :param y: Y coordinate of the start of the gesture in CSS pixels. + :param scale_factor: Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out). + :param relative_speed: *(Optional)* Relative pointer speed in pixels per second (default: 800). + :param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type). + """ + params: T_JSON_DICT = dict() + params["x"] = x + params["y"] = y + params["scaleFactor"] = scale_factor + if relative_speed is not None: + params["relativeSpeed"] = relative_speed + if gesture_source_type is not None: + params["gestureSourceType"] = gesture_source_type.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Input.synthesizePinchGesture", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def synthesize_scroll_gesture( + x: float, + y: float, + x_distance: typing.Optional[float] = None, + y_distance: typing.Optional[float] = None, + x_overscroll: typing.Optional[float] = None, + y_overscroll: typing.Optional[float] = None, + prevent_fling: typing.Optional[bool] = None, + speed: typing.Optional[int] = None, + gesture_source_type: typing.Optional[GestureSourceType] = None, + repeat_count: typing.Optional[int] = None, + repeat_delay_ms: typing.Optional[int] = None, + interaction_marker_name: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Synthesizes a scroll gesture over a time period by issuing appropriate touch events. + + **EXPERIMENTAL** + + :param x: X coordinate of the start of the gesture in CSS pixels. + :param y: Y coordinate of the start of the gesture in CSS pixels. + :param x_distance: *(Optional)* The distance to scroll along the X axis (positive to scroll left). + :param y_distance: *(Optional)* The distance to scroll along the Y axis (positive to scroll up). + :param x_overscroll: *(Optional)* The number of additional pixels to scroll back along the X axis, in addition to the given distance. + :param y_overscroll: *(Optional)* The number of additional pixels to scroll back along the Y axis, in addition to the given distance. + :param prevent_fling: *(Optional)* Prevent fling (default: true). + :param speed: *(Optional)* Swipe speed in pixels per second (default: 800). + :param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type). + :param repeat_count: *(Optional)* The number of times to repeat the gesture (default: 0). + :param repeat_delay_ms: *(Optional)* The number of milliseconds delay between each repeat. (default: 250). + :param interaction_marker_name: *(Optional)* The name of the interaction markers to generate, if not empty (default: ""). + """ + params: T_JSON_DICT = dict() + params["x"] = x + params["y"] = y + if x_distance is not None: + params["xDistance"] = x_distance + if y_distance is not None: + params["yDistance"] = y_distance + if x_overscroll is not None: + params["xOverscroll"] = x_overscroll + if y_overscroll is not None: + params["yOverscroll"] = y_overscroll + if prevent_fling is not None: + params["preventFling"] = prevent_fling + if speed is not None: + params["speed"] = speed + if gesture_source_type is not None: + params["gestureSourceType"] = gesture_source_type.to_json() + if repeat_count is not None: + params["repeatCount"] = repeat_count + if repeat_delay_ms is not None: + params["repeatDelayMs"] = repeat_delay_ms + if interaction_marker_name is not None: + params["interactionMarkerName"] = interaction_marker_name + cmd_dict: T_JSON_DICT = { + "method": "Input.synthesizeScrollGesture", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def synthesize_tap_gesture( + x: float, + y: float, + duration: typing.Optional[int] = None, + tap_count: typing.Optional[int] = None, + gesture_source_type: typing.Optional[GestureSourceType] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Synthesizes a tap gesture over a time period by issuing appropriate touch events. + + **EXPERIMENTAL** + + :param x: X coordinate of the start of the gesture in CSS pixels. + :param y: Y coordinate of the start of the gesture in CSS pixels. + :param duration: *(Optional)* Duration between touchdown and touchup events in ms (default: 50). + :param tap_count: *(Optional)* Number of times to perform the tap (e.g. 2 for double tap, default: 1). + :param gesture_source_type: *(Optional)* Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type). + """ + params: T_JSON_DICT = dict() + params["x"] = x + params["y"] = y + if duration is not None: + params["duration"] = duration + if tap_count is not None: + params["tapCount"] = tap_count + if gesture_source_type is not None: + params["gestureSourceType"] = gesture_source_type.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Input.synthesizeTapGesture", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Input.dragIntercepted") +@dataclass +class DragIntercepted: + """ + **EXPERIMENTAL** + + Emitted only when ``Input.setInterceptDrags`` is enabled. Use this data with ``Input.dispatchDragEvent`` to + restore normal drag and drop behavior. + """ + + data: DragData + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DragIntercepted: + return cls(data=DragData.from_json(json["data"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/inspector.html b/docs/_build/html/_modules/nodriver/cdp/inspector.html new file mode 100644 index 0000000..048ddfe --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/inspector.html @@ -0,0 +1,388 @@ + + + + + + + + nodriver.cdp.inspector - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.inspector

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Inspector (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables inspector domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Inspector.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables inspector domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Inspector.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Inspector.detached") +@dataclass +class Detached: + """ + Fired when remote debugging connection is about to be terminated. Contains detach reason. + """ + + #: The reason why connection has been terminated. + reason: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Detached: + return cls(reason=str(json["reason"]))
+ + + +
+[docs] +@event_class("Inspector.targetCrashed") +@dataclass +class TargetCrashed: + """ + Fired when debugging target has crashed + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetCrashed: + return cls()
+ + + +
+[docs] +@event_class("Inspector.targetReloadedAfterCrash") +@dataclass +class TargetReloadedAfterCrash: + """ + Fired when debugging target has reloaded after crash + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetReloadedAfterCrash: + return cls()
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/io.html b/docs/_build/html/_modules/nodriver/cdp/io.html new file mode 100644 index 0000000..96be479 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/io.html @@ -0,0 +1,420 @@ + + + + + + + + nodriver.cdp.io - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.io

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: IO
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import runtime
+
+
+
+[docs] +class StreamHandle(str): + """ + This is either obtained from another method or specified as ``blob:<uuid>`` where + ``<uuid>`` is an UUID of a Blob. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> StreamHandle: + return cls(json) + + def __repr__(self): + return "StreamHandle({})".format(super().__repr__())
+ + + +
+[docs] +def close(handle: StreamHandle) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Close the stream, discard any temporary backing storage. + + :param handle: Handle of the stream to close. + """ + params: T_JSON_DICT = dict() + params["handle"] = handle.to_json() + cmd_dict: T_JSON_DICT = { + "method": "IO.close", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def read( + handle: StreamHandle, + offset: typing.Optional[int] = None, + size: typing.Optional[int] = None, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.Optional[bool], str, bool] +]: + """ + Read a chunk of the stream + + :param handle: Handle of the stream to read. + :param offset: *(Optional)* Seek to the specified offset before reading (if not specified, proceed with offset following the last read). Some types of streams may only support sequential reads. + :param size: *(Optional)* Maximum number of bytes to read (left upon the agent discretion if not specified). + :returns: A tuple with the following items: + + 0. **base64Encoded** - *(Optional)* Set if the data is base64-encoded + 1. **data** - Data that were read. + 2. **eof** - Set if the end-of-file condition occurred while reading. + """ + params: T_JSON_DICT = dict() + params["handle"] = handle.to_json() + if offset is not None: + params["offset"] = offset + if size is not None: + params["size"] = size + cmd_dict: T_JSON_DICT = { + "method": "IO.read", + "params": params, + } + json = yield cmd_dict + return ( + ( + bool(json["base64Encoded"]) + if json.get("base64Encoded", None) is not None + else None + ), + str(json["data"]), + bool(json["eof"]), + )
+ + + +
+[docs] +def resolve_blob( + object_id: runtime.RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Return UUID of Blob object specified by a remote object id. + + :param object_id: Object id of a Blob object wrapper. + :returns: UUID of the specified Blob. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "IO.resolveBlob", + "params": params, + } + json = yield cmd_dict + return str(json["uuid"])
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/layer_tree.html b/docs/_build/html/_modules/nodriver/cdp/layer_tree.html new file mode 100644 index 0000000..da557ab --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/layer_tree.html @@ -0,0 +1,881 @@ + + + + + + + + nodriver.cdp.layer_tree - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.layer_tree

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: LayerTree (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+
+
+
+[docs] +class LayerId(str): + """ + Unique Layer identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> LayerId: + return cls(json) + + def __repr__(self): + return "LayerId({})".format(super().__repr__())
+ + + +
+[docs] +class SnapshotId(str): + """ + Unique snapshot identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> SnapshotId: + return cls(json) + + def __repr__(self): + return "SnapshotId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class ScrollRect: + """ + Rectangle where scrolling happens on the main thread. + """ + + #: Rectangle itself. + rect: dom.Rect + + #: Reason for rectangle to force scrolling on the main thread + type_: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["rect"] = self.rect.to_json() + json["type"] = self.type_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScrollRect: + return cls( + rect=dom.Rect.from_json(json["rect"]), + type_=str(json["type"]), + )
+ + + +
+[docs] +@dataclass +class StickyPositionConstraint: + """ + Sticky position constraints. + """ + + #: Layout rectangle of the sticky element before being shifted + sticky_box_rect: dom.Rect + + #: Layout rectangle of the containing block of the sticky element + containing_block_rect: dom.Rect + + #: The nearest sticky layer that shifts the sticky box + nearest_layer_shifting_sticky_box: typing.Optional[LayerId] = None + + #: The nearest sticky layer that shifts the containing block + nearest_layer_shifting_containing_block: typing.Optional[LayerId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["stickyBoxRect"] = self.sticky_box_rect.to_json() + json["containingBlockRect"] = self.containing_block_rect.to_json() + if self.nearest_layer_shifting_sticky_box is not None: + json["nearestLayerShiftingStickyBox"] = ( + self.nearest_layer_shifting_sticky_box.to_json() + ) + if self.nearest_layer_shifting_containing_block is not None: + json["nearestLayerShiftingContainingBlock"] = ( + self.nearest_layer_shifting_containing_block.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StickyPositionConstraint: + return cls( + sticky_box_rect=dom.Rect.from_json(json["stickyBoxRect"]), + containing_block_rect=dom.Rect.from_json(json["containingBlockRect"]), + nearest_layer_shifting_sticky_box=( + LayerId.from_json(json["nearestLayerShiftingStickyBox"]) + if json.get("nearestLayerShiftingStickyBox", None) is not None + else None + ), + nearest_layer_shifting_containing_block=( + LayerId.from_json(json["nearestLayerShiftingContainingBlock"]) + if json.get("nearestLayerShiftingContainingBlock", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PictureTile: + """ + Serialized fragment of layer picture along with its offset within the layer. + """ + + #: Offset from owning layer left boundary + x: float + + #: Offset from owning layer top boundary + y: float + + #: Base64-encoded snapshot data. (Encoded as a base64 string when passed over JSON) + picture: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + json["picture"] = self.picture + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PictureTile: + return cls( + x=float(json["x"]), + y=float(json["y"]), + picture=str(json["picture"]), + )
+ + + +
+[docs] +@dataclass +class Layer: + """ + Information about a compositing layer. + """ + + #: The unique id for this layer. + layer_id: LayerId + + #: Offset from parent layer, X coordinate. + offset_x: float + + #: Offset from parent layer, Y coordinate. + offset_y: float + + #: Layer width. + width: float + + #: Layer height. + height: float + + #: Indicates how many time this layer has painted. + paint_count: int + + #: Indicates whether this layer hosts any content, rather than being used for + #: transform/scrolling purposes only. + draws_content: bool + + #: The id of parent (not present for root). + parent_layer_id: typing.Optional[LayerId] = None + + #: The backend id for the node associated with this layer. + backend_node_id: typing.Optional[dom.BackendNodeId] = None + + #: Transformation matrix for layer, default is identity matrix + transform: typing.Optional[typing.List[float]] = None + + #: Transform anchor point X, absent if no transform specified + anchor_x: typing.Optional[float] = None + + #: Transform anchor point Y, absent if no transform specified + anchor_y: typing.Optional[float] = None + + #: Transform anchor point Z, absent if no transform specified + anchor_z: typing.Optional[float] = None + + #: Set if layer is not visible. + invisible: typing.Optional[bool] = None + + #: Rectangles scrolling on main thread only. + scroll_rects: typing.Optional[typing.List[ScrollRect]] = None + + #: Sticky position constraint information + sticky_position_constraint: typing.Optional[StickyPositionConstraint] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["layerId"] = self.layer_id.to_json() + json["offsetX"] = self.offset_x + json["offsetY"] = self.offset_y + json["width"] = self.width + json["height"] = self.height + json["paintCount"] = self.paint_count + json["drawsContent"] = self.draws_content + if self.parent_layer_id is not None: + json["parentLayerId"] = self.parent_layer_id.to_json() + if self.backend_node_id is not None: + json["backendNodeId"] = self.backend_node_id.to_json() + if self.transform is not None: + json["transform"] = [i for i in self.transform] + if self.anchor_x is not None: + json["anchorX"] = self.anchor_x + if self.anchor_y is not None: + json["anchorY"] = self.anchor_y + if self.anchor_z is not None: + json["anchorZ"] = self.anchor_z + if self.invisible is not None: + json["invisible"] = self.invisible + if self.scroll_rects is not None: + json["scrollRects"] = [i.to_json() for i in self.scroll_rects] + if self.sticky_position_constraint is not None: + json["stickyPositionConstraint"] = self.sticky_position_constraint.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Layer: + return cls( + layer_id=LayerId.from_json(json["layerId"]), + offset_x=float(json["offsetX"]), + offset_y=float(json["offsetY"]), + width=float(json["width"]), + height=float(json["height"]), + paint_count=int(json["paintCount"]), + draws_content=bool(json["drawsContent"]), + parent_layer_id=( + LayerId.from_json(json["parentLayerId"]) + if json.get("parentLayerId", None) is not None + else None + ), + backend_node_id=( + dom.BackendNodeId.from_json(json["backendNodeId"]) + if json.get("backendNodeId", None) is not None + else None + ), + transform=( + [float(i) for i in json["transform"]] + if json.get("transform", None) is not None + else None + ), + anchor_x=( + float(json["anchorX"]) + if json.get("anchorX", None) is not None + else None + ), + anchor_y=( + float(json["anchorY"]) + if json.get("anchorY", None) is not None + else None + ), + anchor_z=( + float(json["anchorZ"]) + if json.get("anchorZ", None) is not None + else None + ), + invisible=( + bool(json["invisible"]) + if json.get("invisible", None) is not None + else None + ), + scroll_rects=( + [ScrollRect.from_json(i) for i in json["scrollRects"]] + if json.get("scrollRects", None) is not None + else None + ), + sticky_position_constraint=( + StickyPositionConstraint.from_json(json["stickyPositionConstraint"]) + if json.get("stickyPositionConstraint", None) is not None + else None + ), + )
+ + + +
+[docs] +class PaintProfile(list): + """ + Array of timings, one per paint step. + """ + + def to_json(self) -> typing.List[float]: + return self + + @classmethod + def from_json(cls, json: typing.List[float]) -> PaintProfile: + return cls(json) + + def __repr__(self): + return "PaintProfile({})".format(super().__repr__())
+ + + +
+[docs] +def compositing_reasons( + layer_id: LayerId, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.List[str], typing.List[str]] +]: + """ + Provides the reasons why the given layer was composited. + + :param layer_id: The id of the layer for which we want to get the reasons it was composited. + :returns: A tuple with the following items: + + 0. **compositingReasons** - A list of strings specifying reasons for the given layer to become composited. + 1. **compositingReasonIds** - A list of strings specifying reason IDs for the given layer to become composited. + """ + params: T_JSON_DICT = dict() + params["layerId"] = layer_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.compositingReasons", + "params": params, + } + json = yield cmd_dict + return ( + [str(i) for i in json["compositingReasons"]], + [str(i) for i in json["compositingReasonIds"]], + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables compositing tree inspection. + """ + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables compositing tree inspection. + """ + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def load_snapshot( + tiles: typing.List[PictureTile], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SnapshotId]: + """ + Returns the snapshot identifier. + + :param tiles: An array of tiles composing the snapshot. + :returns: The id of the snapshot. + """ + params: T_JSON_DICT = dict() + params["tiles"] = [i.to_json() for i in tiles] + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.loadSnapshot", + "params": params, + } + json = yield cmd_dict + return SnapshotId.from_json(json["snapshotId"])
+ + + +
+[docs] +def make_snapshot( + layer_id: LayerId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SnapshotId]: + """ + Returns the layer snapshot identifier. + + :param layer_id: The id of the layer. + :returns: The id of the layer snapshot. + """ + params: T_JSON_DICT = dict() + params["layerId"] = layer_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.makeSnapshot", + "params": params, + } + json = yield cmd_dict + return SnapshotId.from_json(json["snapshotId"])
+ + + +
+[docs] +def profile_snapshot( + snapshot_id: SnapshotId, + min_repeat_count: typing.Optional[int] = None, + min_duration: typing.Optional[float] = None, + clip_rect: typing.Optional[dom.Rect] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[PaintProfile]]: + """ + :param snapshot_id: The id of the layer snapshot. + :param min_repeat_count: *(Optional)* The maximum number of times to replay the snapshot (1, if not specified). + :param min_duration: *(Optional)* The minimum duration (in seconds) to replay the snapshot. + :param clip_rect: *(Optional)* The clip rectangle to apply when replaying the snapshot. + :returns: The array of paint profiles, one per run. + """ + params: T_JSON_DICT = dict() + params["snapshotId"] = snapshot_id.to_json() + if min_repeat_count is not None: + params["minRepeatCount"] = min_repeat_count + if min_duration is not None: + params["minDuration"] = min_duration + if clip_rect is not None: + params["clipRect"] = clip_rect.to_json() + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.profileSnapshot", + "params": params, + } + json = yield cmd_dict + return [PaintProfile.from_json(i) for i in json["timings"]]
+ + + +
+[docs] +def release_snapshot( + snapshot_id: SnapshotId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Releases layer snapshot captured by the back-end. + + :param snapshot_id: The id of the layer snapshot. + """ + params: T_JSON_DICT = dict() + params["snapshotId"] = snapshot_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.releaseSnapshot", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def replay_snapshot( + snapshot_id: SnapshotId, + from_step: typing.Optional[int] = None, + to_step: typing.Optional[int] = None, + scale: typing.Optional[float] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Replays the layer snapshot and returns the resulting bitmap. + + :param snapshot_id: The id of the layer snapshot. + :param from_step: *(Optional)* The first step to replay from (replay from the very start if not specified). + :param to_step: *(Optional)* The last step to replay to (replay till the end if not specified). + :param scale: *(Optional)* The scale to apply while replaying (defaults to 1). + :returns: A data: URL for resulting image. + """ + params: T_JSON_DICT = dict() + params["snapshotId"] = snapshot_id.to_json() + if from_step is not None: + params["fromStep"] = from_step + if to_step is not None: + params["toStep"] = to_step + if scale is not None: + params["scale"] = scale + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.replaySnapshot", + "params": params, + } + json = yield cmd_dict + return str(json["dataURL"])
+ + + +
+[docs] +def snapshot_command_log( + snapshot_id: SnapshotId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[dict]]: + """ + Replays the layer snapshot and returns canvas log. + + :param snapshot_id: The id of the layer snapshot. + :returns: The array of canvas function calls. + """ + params: T_JSON_DICT = dict() + params["snapshotId"] = snapshot_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "LayerTree.snapshotCommandLog", + "params": params, + } + json = yield cmd_dict + return [dict(i) for i in json["commandLog"]]
+ + + +
+[docs] +@event_class("LayerTree.layerPainted") +@dataclass +class LayerPainted: + #: The id of the painted layer. + layer_id: LayerId + #: Clip rectangle. + clip: dom.Rect + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayerPainted: + return cls( + layer_id=LayerId.from_json(json["layerId"]), + clip=dom.Rect.from_json(json["clip"]), + )
+ + + +
+[docs] +@event_class("LayerTree.layerTreeDidChange") +@dataclass +class LayerTreeDidChange: + #: Layer tree, absent if not in the compositing mode. + layers: typing.Optional[typing.List[Layer]] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayerTreeDidChange: + return cls( + layers=( + [Layer.from_json(i) for i in json["layers"]] + if json.get("layers", None) is not None + else None + ) + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/log.html b/docs/_build/html/_modules/nodriver/cdp/log.html new file mode 100644 index 0000000..b4b4332 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/log.html @@ -0,0 +1,541 @@ + + + + + + + + nodriver.cdp.log - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.log

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Log
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import network
+from . import runtime
+
+
+
+[docs] +@dataclass +class LogEntry: + """ + Log entry. + """ + + #: Log entry source. + source: str + + #: Log entry severity. + level: str + + #: Logged text. + text: str + + #: Timestamp when this entry was added. + timestamp: runtime.Timestamp + + category: typing.Optional[str] = None + + #: URL of the resource if known. + url: typing.Optional[str] = None + + #: Line number in the resource. + line_number: typing.Optional[int] = None + + #: JavaScript stack trace. + stack_trace: typing.Optional[runtime.StackTrace] = None + + #: Identifier of the network request associated with this entry. + network_request_id: typing.Optional[network.RequestId] = None + + #: Identifier of the worker associated with this entry. + worker_id: typing.Optional[str] = None + + #: Call arguments. + args: typing.Optional[typing.List[runtime.RemoteObject]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["source"] = self.source + json["level"] = self.level + json["text"] = self.text + json["timestamp"] = self.timestamp.to_json() + if self.category is not None: + json["category"] = self.category + if self.url is not None: + json["url"] = self.url + if self.line_number is not None: + json["lineNumber"] = self.line_number + if self.stack_trace is not None: + json["stackTrace"] = self.stack_trace.to_json() + if self.network_request_id is not None: + json["networkRequestId"] = self.network_request_id.to_json() + if self.worker_id is not None: + json["workerId"] = self.worker_id + if self.args is not None: + json["args"] = [i.to_json() for i in self.args] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LogEntry: + return cls( + source=str(json["source"]), + level=str(json["level"]), + text=str(json["text"]), + timestamp=runtime.Timestamp.from_json(json["timestamp"]), + category=( + str(json["category"]) + if json.get("category", None) is not None + else None + ), + url=str(json["url"]) if json.get("url", None) is not None else None, + line_number=( + int(json["lineNumber"]) + if json.get("lineNumber", None) is not None + else None + ), + stack_trace=( + runtime.StackTrace.from_json(json["stackTrace"]) + if json.get("stackTrace", None) is not None + else None + ), + network_request_id=( + network.RequestId.from_json(json["networkRequestId"]) + if json.get("networkRequestId", None) is not None + else None + ), + worker_id=( + str(json["workerId"]) + if json.get("workerId", None) is not None + else None + ), + args=( + [runtime.RemoteObject.from_json(i) for i in json["args"]] + if json.get("args", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ViolationSetting: + """ + Violation configuration setting. + """ + + #: Violation type. + name: str + + #: Time threshold to trigger upon. + threshold: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["threshold"] = self.threshold + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ViolationSetting: + return cls( + name=str(json["name"]), + threshold=float(json["threshold"]), + )
+ + + +
+[docs] +def clear() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears the log. + """ + cmd_dict: T_JSON_DICT = { + "method": "Log.clear", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables log domain, prevents further log entries from being reported to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Log.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables log domain, sends the entries collected so far to the client by means of the + ``entryAdded`` notification. + """ + cmd_dict: T_JSON_DICT = { + "method": "Log.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def start_violations_report( + config: typing.List[ViolationSetting], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + start violation reporting. + + :param config: Configuration for violations. + """ + params: T_JSON_DICT = dict() + params["config"] = [i.to_json() for i in config] + cmd_dict: T_JSON_DICT = { + "method": "Log.startViolationsReport", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_violations_report() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stop violation reporting. + """ + cmd_dict: T_JSON_DICT = { + "method": "Log.stopViolationsReport", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Log.entryAdded") +@dataclass +class EntryAdded: + """ + Issued when new message was logged. + """ + + #: The entry. + entry: LogEntry + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> EntryAdded: + return cls(entry=LogEntry.from_json(json["entry"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/media.html b/docs/_build/html/_modules/nodriver/cdp/media.html new file mode 100644 index 0000000..ce50014 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/media.html @@ -0,0 +1,643 @@ + + + + + + + + nodriver.cdp.media - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.media

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Media (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class PlayerId(str): + """ + Players will get an ID that is unique within the agent context. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> PlayerId: + return cls(json) + + def __repr__(self): + return "PlayerId({})".format(super().__repr__())
+ + + +
+[docs] +class Timestamp(float): + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> Timestamp: + return cls(json) + + def __repr__(self): + return "Timestamp({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class PlayerMessage: + """ + Have one type per entry in MediaLogRecord::Type + Corresponds to kMessage + """ + + #: Keep in sync with MediaLogMessageLevel + #: We are currently keeping the message level 'error' separate from the + #: PlayerError type because right now they represent different things, + #: this one being a DVLOG(ERROR) style log message that gets printed + #: based on what log level is selected in the UI, and the other is a + #: representation of a media::PipelineStatus object. Soon however we're + #: going to be moving away from using PipelineStatus for errors and + #: introducing a new error type which should hopefully let us integrate + #: the error log level into the PlayerError type. + level: str + + message: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["level"] = self.level + json["message"] = self.message + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerMessage: + return cls( + level=str(json["level"]), + message=str(json["message"]), + )
+ + + +
+[docs] +@dataclass +class PlayerProperty: + """ + Corresponds to kMediaPropertyChange + """ + + name: str + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerProperty: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class PlayerEvent: + """ + Corresponds to kMediaEventTriggered + """ + + timestamp: Timestamp + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["timestamp"] = self.timestamp.to_json() + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerEvent: + return cls( + timestamp=Timestamp.from_json(json["timestamp"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class PlayerErrorSourceLocation: + """ + Represents logged source line numbers reported in an error. + NOTE: file and line are from chromium c++ implementation code, not js. + """ + + file: str + + line: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["file"] = self.file + json["line"] = self.line + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerErrorSourceLocation: + return cls( + file=str(json["file"]), + line=int(json["line"]), + )
+ + + +
+[docs] +@dataclass +class PlayerError: + """ + Corresponds to kMediaError + """ + + error_type: str + + #: Code is the numeric enum entry for a specific set of error codes, such + #: as PipelineStatusCodes in media/base/pipeline_status.h + code: int + + #: A trace of where this error was caused / where it passed through. + stack: typing.List[PlayerErrorSourceLocation] + + #: Errors potentially have a root cause error, ie, a DecoderError might be + #: caused by an WindowsError + cause: typing.List[PlayerError] + + #: Extra data attached to an error, such as an HRESULT, Video Codec, etc. + data: dict + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["errorType"] = self.error_type + json["code"] = self.code + json["stack"] = [i.to_json() for i in self.stack] + json["cause"] = [i.to_json() for i in self.cause] + json["data"] = self.data + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerError: + return cls( + error_type=str(json["errorType"]), + code=int(json["code"]), + stack=[PlayerErrorSourceLocation.from_json(i) for i in json["stack"]], + cause=[PlayerError.from_json(i) for i in json["cause"]], + data=dict(json["data"]), + )
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables the Media domain + """ + cmd_dict: T_JSON_DICT = { + "method": "Media.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables the Media domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "Media.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Media.playerPropertiesChanged") +@dataclass +class PlayerPropertiesChanged: + """ + This can be called multiple times, and can be used to set / override / + remove player properties. A null propValue indicates removal. + """ + + player_id: PlayerId + properties: typing.List[PlayerProperty] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerPropertiesChanged: + return cls( + player_id=PlayerId.from_json(json["playerId"]), + properties=[PlayerProperty.from_json(i) for i in json["properties"]], + )
+ + + +
+[docs] +@event_class("Media.playerEventsAdded") +@dataclass +class PlayerEventsAdded: + """ + Send events as a list, allowing them to be batched on the browser for less + congestion. If batched, events must ALWAYS be in chronological order. + """ + + player_id: PlayerId + events: typing.List[PlayerEvent] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerEventsAdded: + return cls( + player_id=PlayerId.from_json(json["playerId"]), + events=[PlayerEvent.from_json(i) for i in json["events"]], + )
+ + + +
+[docs] +@event_class("Media.playerMessagesLogged") +@dataclass +class PlayerMessagesLogged: + """ + Send a list of any messages that need to be delivered. + """ + + player_id: PlayerId + messages: typing.List[PlayerMessage] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerMessagesLogged: + return cls( + player_id=PlayerId.from_json(json["playerId"]), + messages=[PlayerMessage.from_json(i) for i in json["messages"]], + )
+ + + +
+[docs] +@event_class("Media.playerErrorsRaised") +@dataclass +class PlayerErrorsRaised: + """ + Send a list of any errors that need to be delivered. + """ + + player_id: PlayerId + errors: typing.List[PlayerError] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayerErrorsRaised: + return cls( + player_id=PlayerId.from_json(json["playerId"]), + errors=[PlayerError.from_json(i) for i in json["errors"]], + )
+ + + +
+[docs] +@event_class("Media.playersCreated") +@dataclass +class PlayersCreated: + """ + Called whenever a player is created, or when a new agent joins and receives + a list of active players. If an agent is restored, it will receive the full + list of player ids and all events again. + """ + + players: typing.List[PlayerId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PlayersCreated: + return cls(players=[PlayerId.from_json(i) for i in json["players"]])
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/memory.html b/docs/_build/html/_modules/nodriver/cdp/memory.html new file mode 100644 index 0000000..37bad6e --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/memory.html @@ -0,0 +1,617 @@ + + + + + + + + nodriver.cdp.memory - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.memory

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Memory (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class PressureLevel(enum.Enum): + """ + Memory pressure level. + """ + + MODERATE = "moderate" + CRITICAL = "critical" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PressureLevel: + return cls(json)
+ + + +
+[docs] +@dataclass +class SamplingProfileNode: + """ + Heap profile sample. + """ + + #: Size of the sampled allocation. + size: float + + #: Total bytes attributed to this sample. + total: float + + #: Execution stack at the point of allocation. + stack: typing.List[str] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["size"] = self.size + json["total"] = self.total + json["stack"] = [i for i in self.stack] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SamplingProfileNode: + return cls( + size=float(json["size"]), + total=float(json["total"]), + stack=[str(i) for i in json["stack"]], + )
+ + + +
+[docs] +@dataclass +class SamplingProfile: + """ + Array of heap profile samples. + """ + + samples: typing.List[SamplingProfileNode] + + modules: typing.List[Module] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["samples"] = [i.to_json() for i in self.samples] + json["modules"] = [i.to_json() for i in self.modules] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SamplingProfile: + return cls( + samples=[SamplingProfileNode.from_json(i) for i in json["samples"]], + modules=[Module.from_json(i) for i in json["modules"]], + )
+ + + +
+[docs] +@dataclass +class Module: + """ + Executable module information + """ + + #: Name of the module. + name: str + + #: UUID of the module. + uuid: str + + #: Base address where the module is loaded into memory. Encoded as a decimal + #: or hexadecimal (0x prefixed) string. + base_address: str + + #: Size of the module in bytes. + size: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["uuid"] = self.uuid + json["baseAddress"] = self.base_address + json["size"] = self.size + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Module: + return cls( + name=str(json["name"]), + uuid=str(json["uuid"]), + base_address=str(json["baseAddress"]), + size=float(json["size"]), + )
+ + + +
+[docs] +def get_dom_counters() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[int, int, int]] +): + """ + + + :returns: A tuple with the following items: + + 0. **documents** - + 1. **nodes** - + 2. **jsEventListeners** - + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.getDOMCounters", + } + json = yield cmd_dict + return (int(json["documents"]), int(json["nodes"]), int(json["jsEventListeners"]))
+ + + +
+[docs] +def prepare_for_leak_detection() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Memory.prepareForLeakDetection", + } + json = yield cmd_dict
+ + + +
+[docs] +def forcibly_purge_java_script_memory() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, None] +): + """ + Simulate OomIntervention by purging V8 memory. + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.forciblyPurgeJavaScriptMemory", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_pressure_notifications_suppressed( + suppressed: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable/disable suppressing memory pressure notifications in all processes. + + :param suppressed: If true, memory pressure notifications will be suppressed. + """ + params: T_JSON_DICT = dict() + params["suppressed"] = suppressed + cmd_dict: T_JSON_DICT = { + "method": "Memory.setPressureNotificationsSuppressed", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def simulate_pressure_notification( + level: PressureLevel, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Simulate a memory pressure notification in all processes. + + :param level: Memory pressure level of the notification. + """ + params: T_JSON_DICT = dict() + params["level"] = level.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Memory.simulatePressureNotification", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_sampling( + sampling_interval: typing.Optional[int] = None, + suppress_randomness: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Start collecting native memory profile. + + :param sampling_interval: *(Optional)* Average number of bytes between samples. + :param suppress_randomness: *(Optional)* Do not randomize intervals between samples. + """ + params: T_JSON_DICT = dict() + if sampling_interval is not None: + params["samplingInterval"] = sampling_interval + if suppress_randomness is not None: + params["suppressRandomness"] = suppress_randomness + cmd_dict: T_JSON_DICT = { + "method": "Memory.startSampling", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_sampling() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stop collecting native memory profile. + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.stopSampling", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_all_time_sampling_profile() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, SamplingProfile] +): + """ + Retrieve native memory allocations profile + collected since renderer process startup. + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.getAllTimeSamplingProfile", + } + json = yield cmd_dict + return SamplingProfile.from_json(json["profile"])
+ + + +
+[docs] +def get_browser_sampling_profile() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, SamplingProfile] +): + """ + Retrieve native memory allocations profile + collected since browser process startup. + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.getBrowserSamplingProfile", + } + json = yield cmd_dict + return SamplingProfile.from_json(json["profile"])
+ + + +
+[docs] +def get_sampling_profile() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, SamplingProfile] +): + """ + Retrieve native memory allocations profile collected since last + ``startSampling`` call. + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Memory.getSamplingProfile", + } + json = yield cmd_dict + return SamplingProfile.from_json(json["profile"])
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/network.html b/docs/_build/html/_modules/nodriver/cdp/network.html new file mode 100644 index 0000000..a8370fe --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/network.html @@ -0,0 +1,5323 @@ + + + + + + + + nodriver.cdp.network - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.network

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Network
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import debugger
+from . import emulation
+from . import io
+from . import page
+from . import runtime
+from . import security
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class ResourceType(enum.Enum): + """ + Resource type as it was perceived by the rendering engine. + """ + + DOCUMENT = "Document" + STYLESHEET = "Stylesheet" + IMAGE = "Image" + MEDIA = "Media" + FONT = "Font" + SCRIPT = "Script" + TEXT_TRACK = "TextTrack" + XHR = "XHR" + FETCH = "Fetch" + PREFETCH = "Prefetch" + EVENT_SOURCE = "EventSource" + WEB_SOCKET = "WebSocket" + MANIFEST = "Manifest" + SIGNED_EXCHANGE = "SignedExchange" + PING = "Ping" + CSP_VIOLATION_REPORT = "CSPViolationReport" + PREFLIGHT = "Preflight" + OTHER = "Other" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ResourceType: + return cls(json)
+ + + +
+[docs] +class LoaderId(str): + """ + Unique loader identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> LoaderId: + return cls(json) + + def __repr__(self): + return "LoaderId({})".format(super().__repr__())
+ + + +
+[docs] +class RequestId(str): + """ + Unique request identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RequestId: + return cls(json) + + def __repr__(self): + return "RequestId({})".format(super().__repr__())
+ + + +
+[docs] +class InterceptionId(str): + """ + Unique intercepted request identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> InterceptionId: + return cls(json) + + def __repr__(self): + return "InterceptionId({})".format(super().__repr__())
+ + + +
+[docs] +class ErrorReason(enum.Enum): + """ + Network level fetch failure reason. + """ + + FAILED = "Failed" + ABORTED = "Aborted" + TIMED_OUT = "TimedOut" + ACCESS_DENIED = "AccessDenied" + CONNECTION_CLOSED = "ConnectionClosed" + CONNECTION_RESET = "ConnectionReset" + CONNECTION_REFUSED = "ConnectionRefused" + CONNECTION_ABORTED = "ConnectionAborted" + CONNECTION_FAILED = "ConnectionFailed" + NAME_NOT_RESOLVED = "NameNotResolved" + INTERNET_DISCONNECTED = "InternetDisconnected" + ADDRESS_UNREACHABLE = "AddressUnreachable" + BLOCKED_BY_CLIENT = "BlockedByClient" + BLOCKED_BY_RESPONSE = "BlockedByResponse" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ErrorReason: + return cls(json)
+ + + +
+[docs] +class TimeSinceEpoch(float): + """ + UTC time in seconds, counted from January 1, 1970. + """ + + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> TimeSinceEpoch: + return cls(json) + + def __repr__(self): + return "TimeSinceEpoch({})".format(super().__repr__())
+ + + +
+[docs] +class MonotonicTime(float): + """ + Monotonically increasing time in seconds since an arbitrary point in the past. + """ + + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> MonotonicTime: + return cls(json) + + def __repr__(self): + return "MonotonicTime({})".format(super().__repr__())
+ + + +
+[docs] +class Headers(dict): + """ + Request / response headers as keys / values of JSON object. + """ + + def to_json(self) -> dict: + return self + + @classmethod + def from_json(cls, json: dict) -> Headers: + return cls(json) + + def __repr__(self): + return "Headers({})".format(super().__repr__())
+ + + +
+[docs] +class ConnectionType(enum.Enum): + """ + The underlying connection technology that the browser is supposedly using. + """ + + NONE = "none" + CELLULAR2G = "cellular2g" + CELLULAR3G = "cellular3g" + CELLULAR4G = "cellular4g" + BLUETOOTH = "bluetooth" + ETHERNET = "ethernet" + WIFI = "wifi" + WIMAX = "wimax" + OTHER = "other" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ConnectionType: + return cls(json)
+ + + +
+[docs] +class CookieSameSite(enum.Enum): + """ + Represents the cookie's 'SameSite' status: + https://tools.ietf.org/html/draft-west-first-party-cookies + """ + + STRICT = "Strict" + LAX = "Lax" + NONE = "None" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieSameSite: + return cls(json)
+ + + +
+[docs] +class CookiePriority(enum.Enum): + """ + Represents the cookie's 'Priority' status: + https://tools.ietf.org/html/draft-west-cookie-priority-00 + """ + + LOW = "Low" + MEDIUM = "Medium" + HIGH = "High" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookiePriority: + return cls(json)
+ + + +
+[docs] +class CookieSourceScheme(enum.Enum): + """ + Represents the source scheme of the origin that originally set the cookie. + A value of "Unset" allows protocol clients to emulate legacy cookie scope for the scheme. + This is a temporary ability and it will be removed in the future. + """ + + UNSET = "Unset" + NON_SECURE = "NonSecure" + SECURE = "Secure" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieSourceScheme: + return cls(json)
+ + + +
+[docs] +@dataclass +class ResourceTiming: + """ + Timing information for the request. + """ + + #: Timing's requestTime is a baseline in seconds, while the other numbers are ticks in + #: milliseconds relatively to this requestTime. + request_time: float + + #: Started resolving proxy. + proxy_start: float + + #: Finished resolving proxy. + proxy_end: float + + #: Started DNS address resolve. + dns_start: float + + #: Finished DNS address resolve. + dns_end: float + + #: Started connecting to the remote host. + connect_start: float + + #: Connected to the remote host. + connect_end: float + + #: Started SSL handshake. + ssl_start: float + + #: Finished SSL handshake. + ssl_end: float + + #: Started running ServiceWorker. + worker_start: float + + #: Finished Starting ServiceWorker. + worker_ready: float + + #: Started fetch event. + worker_fetch_start: float + + #: Settled fetch event respondWith promise. + worker_respond_with_settled: float + + #: Started sending request. + send_start: float + + #: Finished sending request. + send_end: float + + #: Time the server started pushing request. + push_start: float + + #: Time the server finished pushing request. + push_end: float + + #: Started receiving response headers. + receive_headers_start: float + + #: Finished receiving response headers. + receive_headers_end: float + + #: Started ServiceWorker static routing source evaluation. + worker_router_evaluation_start: typing.Optional[float] = None + + #: Started cache lookup when the source was evaluated to ``cache``. + worker_cache_lookup_start: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["requestTime"] = self.request_time + json["proxyStart"] = self.proxy_start + json["proxyEnd"] = self.proxy_end + json["dnsStart"] = self.dns_start + json["dnsEnd"] = self.dns_end + json["connectStart"] = self.connect_start + json["connectEnd"] = self.connect_end + json["sslStart"] = self.ssl_start + json["sslEnd"] = self.ssl_end + json["workerStart"] = self.worker_start + json["workerReady"] = self.worker_ready + json["workerFetchStart"] = self.worker_fetch_start + json["workerRespondWithSettled"] = self.worker_respond_with_settled + json["sendStart"] = self.send_start + json["sendEnd"] = self.send_end + json["pushStart"] = self.push_start + json["pushEnd"] = self.push_end + json["receiveHeadersStart"] = self.receive_headers_start + json["receiveHeadersEnd"] = self.receive_headers_end + if self.worker_router_evaluation_start is not None: + json["workerRouterEvaluationStart"] = self.worker_router_evaluation_start + if self.worker_cache_lookup_start is not None: + json["workerCacheLookupStart"] = self.worker_cache_lookup_start + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResourceTiming: + return cls( + request_time=float(json["requestTime"]), + proxy_start=float(json["proxyStart"]), + proxy_end=float(json["proxyEnd"]), + dns_start=float(json["dnsStart"]), + dns_end=float(json["dnsEnd"]), + connect_start=float(json["connectStart"]), + connect_end=float(json["connectEnd"]), + ssl_start=float(json["sslStart"]), + ssl_end=float(json["sslEnd"]), + worker_start=float(json["workerStart"]), + worker_ready=float(json["workerReady"]), + worker_fetch_start=float(json["workerFetchStart"]), + worker_respond_with_settled=float(json["workerRespondWithSettled"]), + send_start=float(json["sendStart"]), + send_end=float(json["sendEnd"]), + push_start=float(json["pushStart"]), + push_end=float(json["pushEnd"]), + receive_headers_start=float(json["receiveHeadersStart"]), + receive_headers_end=float(json["receiveHeadersEnd"]), + worker_router_evaluation_start=( + float(json["workerRouterEvaluationStart"]) + if json.get("workerRouterEvaluationStart", None) is not None + else None + ), + worker_cache_lookup_start=( + float(json["workerCacheLookupStart"]) + if json.get("workerCacheLookupStart", None) is not None + else None + ), + )
+ + + +
+[docs] +class ResourcePriority(enum.Enum): + """ + Loading priority of a resource request. + """ + + VERY_LOW = "VeryLow" + LOW = "Low" + MEDIUM = "Medium" + HIGH = "High" + VERY_HIGH = "VeryHigh" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ResourcePriority: + return cls(json)
+ + + +
+[docs] +@dataclass +class PostDataEntry: + """ + Post data entry for HTTP request + """ + + bytes_: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.bytes_ is not None: + json["bytes"] = self.bytes_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PostDataEntry: + return cls( + bytes_=str(json["bytes"]) if json.get("bytes", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class Request: + """ + HTTP request data. + """ + + #: Request URL (without fragment). + url: str + + #: HTTP request method. + method: str + + #: HTTP request headers. + headers: Headers + + #: Priority of the resource request at the time request is sent. + initial_priority: ResourcePriority + + #: The referrer policy of the request, as defined in https://www.w3.org/TR/referrer-policy/ + referrer_policy: str + + #: Fragment of the requested URL starting with hash, if present. + url_fragment: typing.Optional[str] = None + + #: HTTP POST request data. + #: Use postDataEntries instead. + post_data: typing.Optional[str] = None + + #: True when the request has POST data. Note that postData might still be omitted when this flag is true when the data is too long. + has_post_data: typing.Optional[bool] = None + + #: Request body elements (post data broken into individual entries). + post_data_entries: typing.Optional[typing.List[PostDataEntry]] = None + + #: The mixed content type of the request. + mixed_content_type: typing.Optional[security.MixedContentType] = None + + #: Whether is loaded via link preload. + is_link_preload: typing.Optional[bool] = None + + #: Set for requests when the TrustToken API is used. Contains the parameters + #: passed by the developer (e.g. via "fetch") as understood by the backend. + trust_token_params: typing.Optional[TrustTokenParams] = None + + #: True if this resource request is considered to be the 'same site' as the + #: request corresponding to the main frame. + is_same_site: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["method"] = self.method + json["headers"] = self.headers.to_json() + json["initialPriority"] = self.initial_priority.to_json() + json["referrerPolicy"] = self.referrer_policy + if self.url_fragment is not None: + json["urlFragment"] = self.url_fragment + if self.post_data is not None: + json["postData"] = self.post_data + if self.has_post_data is not None: + json["hasPostData"] = self.has_post_data + if self.post_data_entries is not None: + json["postDataEntries"] = [i.to_json() for i in self.post_data_entries] + if self.mixed_content_type is not None: + json["mixedContentType"] = self.mixed_content_type.to_json() + if self.is_link_preload is not None: + json["isLinkPreload"] = self.is_link_preload + if self.trust_token_params is not None: + json["trustTokenParams"] = self.trust_token_params.to_json() + if self.is_same_site is not None: + json["isSameSite"] = self.is_same_site + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Request: + return cls( + url=str(json["url"]), + method=str(json["method"]), + headers=Headers.from_json(json["headers"]), + initial_priority=ResourcePriority.from_json(json["initialPriority"]), + referrer_policy=str(json["referrerPolicy"]), + url_fragment=( + str(json["urlFragment"]) + if json.get("urlFragment", None) is not None + else None + ), + post_data=( + str(json["postData"]) + if json.get("postData", None) is not None + else None + ), + has_post_data=( + bool(json["hasPostData"]) + if json.get("hasPostData", None) is not None + else None + ), + post_data_entries=( + [PostDataEntry.from_json(i) for i in json["postDataEntries"]] + if json.get("postDataEntries", None) is not None + else None + ), + mixed_content_type=( + security.MixedContentType.from_json(json["mixedContentType"]) + if json.get("mixedContentType", None) is not None + else None + ), + is_link_preload=( + bool(json["isLinkPreload"]) + if json.get("isLinkPreload", None) is not None + else None + ), + trust_token_params=( + TrustTokenParams.from_json(json["trustTokenParams"]) + if json.get("trustTokenParams", None) is not None + else None + ), + is_same_site=( + bool(json["isSameSite"]) + if json.get("isSameSite", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SignedCertificateTimestamp: + """ + Details of a signed certificate timestamp (SCT). + """ + + #: Validation status. + status: str + + #: Origin. + origin: str + + #: Log name / description. + log_description: str + + #: Log ID. + log_id: str + + #: Issuance date. Unlike TimeSinceEpoch, this contains the number of + #: milliseconds since January 1, 1970, UTC, not the number of seconds. + timestamp: float + + #: Hash algorithm. + hash_algorithm: str + + #: Signature algorithm. + signature_algorithm: str + + #: Signature data. + signature_data: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["status"] = self.status + json["origin"] = self.origin + json["logDescription"] = self.log_description + json["logId"] = self.log_id + json["timestamp"] = self.timestamp + json["hashAlgorithm"] = self.hash_algorithm + json["signatureAlgorithm"] = self.signature_algorithm + json["signatureData"] = self.signature_data + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedCertificateTimestamp: + return cls( + status=str(json["status"]), + origin=str(json["origin"]), + log_description=str(json["logDescription"]), + log_id=str(json["logId"]), + timestamp=float(json["timestamp"]), + hash_algorithm=str(json["hashAlgorithm"]), + signature_algorithm=str(json["signatureAlgorithm"]), + signature_data=str(json["signatureData"]), + )
+ + + +
+[docs] +@dataclass +class SecurityDetails: + """ + Security details about a request. + """ + + #: Protocol name (e.g. "TLS 1.2" or "QUIC"). + protocol: str + + #: Key Exchange used by the connection, or the empty string if not applicable. + key_exchange: str + + #: Cipher name. + cipher: str + + #: Certificate ID value. + certificate_id: security.CertificateId + + #: Certificate subject name. + subject_name: str + + #: Subject Alternative Name (SAN) DNS names and IP addresses. + san_list: typing.List[str] + + #: Name of the issuing CA. + issuer: str + + #: Certificate valid from date. + valid_from: TimeSinceEpoch + + #: Certificate valid to (expiration) date + valid_to: TimeSinceEpoch + + #: List of signed certificate timestamps (SCTs). + signed_certificate_timestamp_list: typing.List[SignedCertificateTimestamp] + + #: Whether the request complied with Certificate Transparency policy + certificate_transparency_compliance: CertificateTransparencyCompliance + + #: Whether the connection used Encrypted ClientHello + encrypted_client_hello: bool + + #: (EC)DH group used by the connection, if applicable. + key_exchange_group: typing.Optional[str] = None + + #: TLS MAC. Note that AEAD ciphers do not have separate MACs. + mac: typing.Optional[str] = None + + #: The signature algorithm used by the server in the TLS server signature, + #: represented as a TLS SignatureScheme code point. Omitted if not + #: applicable or not known. + server_signature_algorithm: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["protocol"] = self.protocol + json["keyExchange"] = self.key_exchange + json["cipher"] = self.cipher + json["certificateId"] = self.certificate_id.to_json() + json["subjectName"] = self.subject_name + json["sanList"] = [i for i in self.san_list] + json["issuer"] = self.issuer + json["validFrom"] = self.valid_from.to_json() + json["validTo"] = self.valid_to.to_json() + json["signedCertificateTimestampList"] = [ + i.to_json() for i in self.signed_certificate_timestamp_list + ] + json["certificateTransparencyCompliance"] = ( + self.certificate_transparency_compliance.to_json() + ) + json["encryptedClientHello"] = self.encrypted_client_hello + if self.key_exchange_group is not None: + json["keyExchangeGroup"] = self.key_exchange_group + if self.mac is not None: + json["mac"] = self.mac + if self.server_signature_algorithm is not None: + json["serverSignatureAlgorithm"] = self.server_signature_algorithm + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SecurityDetails: + return cls( + protocol=str(json["protocol"]), + key_exchange=str(json["keyExchange"]), + cipher=str(json["cipher"]), + certificate_id=security.CertificateId.from_json(json["certificateId"]), + subject_name=str(json["subjectName"]), + san_list=[str(i) for i in json["sanList"]], + issuer=str(json["issuer"]), + valid_from=TimeSinceEpoch.from_json(json["validFrom"]), + valid_to=TimeSinceEpoch.from_json(json["validTo"]), + signed_certificate_timestamp_list=[ + SignedCertificateTimestamp.from_json(i) + for i in json["signedCertificateTimestampList"] + ], + certificate_transparency_compliance=CertificateTransparencyCompliance.from_json( + json["certificateTransparencyCompliance"] + ), + encrypted_client_hello=bool(json["encryptedClientHello"]), + key_exchange_group=( + str(json["keyExchangeGroup"]) + if json.get("keyExchangeGroup", None) is not None + else None + ), + mac=str(json["mac"]) if json.get("mac", None) is not None else None, + server_signature_algorithm=( + int(json["serverSignatureAlgorithm"]) + if json.get("serverSignatureAlgorithm", None) is not None + else None + ), + )
+ + + +
+[docs] +class CertificateTransparencyCompliance(enum.Enum): + """ + Whether the request complied with Certificate Transparency policy. + """ + + UNKNOWN = "unknown" + NOT_COMPLIANT = "not-compliant" + COMPLIANT = "compliant" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CertificateTransparencyCompliance: + return cls(json)
+ + + +
+[docs] +class BlockedReason(enum.Enum): + """ + The reason why request was blocked. + """ + + OTHER = "other" + CSP = "csp" + MIXED_CONTENT = "mixed-content" + ORIGIN = "origin" + INSPECTOR = "inspector" + SUBRESOURCE_FILTER = "subresource-filter" + CONTENT_TYPE = "content-type" + COEP_FRAME_RESOURCE_NEEDS_COEP_HEADER = "coep-frame-resource-needs-coep-header" + COOP_SANDBOXED_IFRAME_CANNOT_NAVIGATE_TO_COOP_PAGE = ( + "coop-sandboxed-iframe-cannot-navigate-to-coop-page" + ) + CORP_NOT_SAME_ORIGIN = "corp-not-same-origin" + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_COEP = ( + "corp-not-same-origin-after-defaulted-to-same-origin-by-coep" + ) + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_DIP = ( + "corp-not-same-origin-after-defaulted-to-same-origin-by-dip" + ) + CORP_NOT_SAME_ORIGIN_AFTER_DEFAULTED_TO_SAME_ORIGIN_BY_COEP_AND_DIP = ( + "corp-not-same-origin-after-defaulted-to-same-origin-by-coep-and-dip" + ) + CORP_NOT_SAME_SITE = "corp-not-same-site" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> BlockedReason: + return cls(json)
+ + + +
+[docs] +class CorsError(enum.Enum): + """ + The reason why request was blocked. + """ + + DISALLOWED_BY_MODE = "DisallowedByMode" + INVALID_RESPONSE = "InvalidResponse" + WILDCARD_ORIGIN_NOT_ALLOWED = "WildcardOriginNotAllowed" + MISSING_ALLOW_ORIGIN_HEADER = "MissingAllowOriginHeader" + MULTIPLE_ALLOW_ORIGIN_VALUES = "MultipleAllowOriginValues" + INVALID_ALLOW_ORIGIN_VALUE = "InvalidAllowOriginValue" + ALLOW_ORIGIN_MISMATCH = "AllowOriginMismatch" + INVALID_ALLOW_CREDENTIALS = "InvalidAllowCredentials" + CORS_DISABLED_SCHEME = "CorsDisabledScheme" + PREFLIGHT_INVALID_STATUS = "PreflightInvalidStatus" + PREFLIGHT_DISALLOWED_REDIRECT = "PreflightDisallowedRedirect" + PREFLIGHT_WILDCARD_ORIGIN_NOT_ALLOWED = "PreflightWildcardOriginNotAllowed" + PREFLIGHT_MISSING_ALLOW_ORIGIN_HEADER = "PreflightMissingAllowOriginHeader" + PREFLIGHT_MULTIPLE_ALLOW_ORIGIN_VALUES = "PreflightMultipleAllowOriginValues" + PREFLIGHT_INVALID_ALLOW_ORIGIN_VALUE = "PreflightInvalidAllowOriginValue" + PREFLIGHT_ALLOW_ORIGIN_MISMATCH = "PreflightAllowOriginMismatch" + PREFLIGHT_INVALID_ALLOW_CREDENTIALS = "PreflightInvalidAllowCredentials" + PREFLIGHT_MISSING_ALLOW_EXTERNAL = "PreflightMissingAllowExternal" + PREFLIGHT_INVALID_ALLOW_EXTERNAL = "PreflightInvalidAllowExternal" + PREFLIGHT_MISSING_ALLOW_PRIVATE_NETWORK = "PreflightMissingAllowPrivateNetwork" + PREFLIGHT_INVALID_ALLOW_PRIVATE_NETWORK = "PreflightInvalidAllowPrivateNetwork" + INVALID_ALLOW_METHODS_PREFLIGHT_RESPONSE = "InvalidAllowMethodsPreflightResponse" + INVALID_ALLOW_HEADERS_PREFLIGHT_RESPONSE = "InvalidAllowHeadersPreflightResponse" + METHOD_DISALLOWED_BY_PREFLIGHT_RESPONSE = "MethodDisallowedByPreflightResponse" + HEADER_DISALLOWED_BY_PREFLIGHT_RESPONSE = "HeaderDisallowedByPreflightResponse" + REDIRECT_CONTAINS_CREDENTIALS = "RedirectContainsCredentials" + INSECURE_PRIVATE_NETWORK = "InsecurePrivateNetwork" + INVALID_PRIVATE_NETWORK_ACCESS = "InvalidPrivateNetworkAccess" + UNEXPECTED_PRIVATE_NETWORK_ACCESS = "UnexpectedPrivateNetworkAccess" + NO_CORS_REDIRECT_MODE_NOT_FOLLOW = "NoCorsRedirectModeNotFollow" + PREFLIGHT_MISSING_PRIVATE_NETWORK_ACCESS_ID = ( + "PreflightMissingPrivateNetworkAccessId" + ) + PREFLIGHT_MISSING_PRIVATE_NETWORK_ACCESS_NAME = ( + "PreflightMissingPrivateNetworkAccessName" + ) + PRIVATE_NETWORK_ACCESS_PERMISSION_UNAVAILABLE = ( + "PrivateNetworkAccessPermissionUnavailable" + ) + PRIVATE_NETWORK_ACCESS_PERMISSION_DENIED = "PrivateNetworkAccessPermissionDenied" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CorsError: + return cls(json)
+ + + +
+[docs] +@dataclass +class CorsErrorStatus: + cors_error: CorsError + + failed_parameter: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["corsError"] = self.cors_error.to_json() + json["failedParameter"] = self.failed_parameter + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CorsErrorStatus: + return cls( + cors_error=CorsError.from_json(json["corsError"]), + failed_parameter=str(json["failedParameter"]), + )
+ + + +
+[docs] +class ServiceWorkerResponseSource(enum.Enum): + """ + Source of serviceworker response. + """ + + CACHE_STORAGE = "cache-storage" + HTTP_CACHE = "http-cache" + FALLBACK_CODE = "fallback-code" + NETWORK = "network" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ServiceWorkerResponseSource: + return cls(json)
+ + + +
+[docs] +@dataclass +class TrustTokenParams: + """ + Determines what type of Trust Token operation is executed and + depending on the type, some additional parameters. The values + are specified in third_party/blink/renderer/core/fetch/trust_token.idl. + """ + + operation: TrustTokenOperationType + + #: Only set for "token-redemption" operation and determine whether + #: to request a fresh SRR or use a still valid cached SRR. + refresh_policy: str + + #: Origins of issuers from whom to request tokens or redemption + #: records. + issuers: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["operation"] = self.operation.to_json() + json["refreshPolicy"] = self.refresh_policy + if self.issuers is not None: + json["issuers"] = [i for i in self.issuers] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TrustTokenParams: + return cls( + operation=TrustTokenOperationType.from_json(json["operation"]), + refresh_policy=str(json["refreshPolicy"]), + issuers=( + [str(i) for i in json["issuers"]] + if json.get("issuers", None) is not None + else None + ), + )
+ + + +
+[docs] +class TrustTokenOperationType(enum.Enum): + ISSUANCE = "Issuance" + REDEMPTION = "Redemption" + SIGNING = "Signing" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> TrustTokenOperationType: + return cls(json)
+ + + +
+[docs] +class AlternateProtocolUsage(enum.Enum): + """ + The reason why Chrome uses a specific transport protocol for HTTP semantics. + """ + + ALTERNATIVE_JOB_WON_WITHOUT_RACE = "alternativeJobWonWithoutRace" + ALTERNATIVE_JOB_WON_RACE = "alternativeJobWonRace" + MAIN_JOB_WON_RACE = "mainJobWonRace" + MAPPING_MISSING = "mappingMissing" + BROKEN = "broken" + DNS_ALPN_H3_JOB_WON_WITHOUT_RACE = "dnsAlpnH3JobWonWithoutRace" + DNS_ALPN_H3_JOB_WON_RACE = "dnsAlpnH3JobWonRace" + UNSPECIFIED_REASON = "unspecifiedReason" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AlternateProtocolUsage: + return cls(json)
+ + + +
+[docs] +class ServiceWorkerRouterSource(enum.Enum): + """ + Source of service worker router. + """ + + NETWORK = "network" + CACHE = "cache" + FETCH_EVENT = "fetch-event" + RACE_NETWORK_AND_FETCH_HANDLER = "race-network-and-fetch-handler" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ServiceWorkerRouterSource: + return cls(json)
+ + + +
+[docs] +@dataclass +class ServiceWorkerRouterInfo: + #: ID of the rule matched. If there is a matched rule, this field will + #: be set, otherwiser no value will be set. + rule_id_matched: typing.Optional[int] = None + + #: The router source of the matched rule. If there is a matched rule, this + #: field will be set, otherwise no value will be set. + matched_source_type: typing.Optional[ServiceWorkerRouterSource] = None + + #: The actual router source used. + actual_source_type: typing.Optional[ServiceWorkerRouterSource] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.rule_id_matched is not None: + json["ruleIdMatched"] = self.rule_id_matched + if self.matched_source_type is not None: + json["matchedSourceType"] = self.matched_source_type.to_json() + if self.actual_source_type is not None: + json["actualSourceType"] = self.actual_source_type.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ServiceWorkerRouterInfo: + return cls( + rule_id_matched=( + int(json["ruleIdMatched"]) + if json.get("ruleIdMatched", None) is not None + else None + ), + matched_source_type=( + ServiceWorkerRouterSource.from_json(json["matchedSourceType"]) + if json.get("matchedSourceType", None) is not None + else None + ), + actual_source_type=( + ServiceWorkerRouterSource.from_json(json["actualSourceType"]) + if json.get("actualSourceType", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Response: + """ + HTTP response data. + """ + + #: Response URL. This URL can be different from CachedResource.url in case of redirect. + url: str + + #: HTTP response status code. + status: int + + #: HTTP response status text. + status_text: str + + #: HTTP response headers. + headers: Headers + + #: Resource mimeType as determined by the browser. + mime_type: str + + #: Resource charset as determined by the browser (if applicable). + charset: str + + #: Specifies whether physical connection was actually reused for this request. + connection_reused: bool + + #: Physical connection id that was actually used for this request. + connection_id: float + + #: Total number of bytes received for this request so far. + encoded_data_length: float + + #: Security state of the request resource. + security_state: security.SecurityState + + #: HTTP response headers text. This has been replaced by the headers in Network.responseReceivedExtraInfo. + headers_text: typing.Optional[str] = None + + #: Refined HTTP request headers that were actually transmitted over the network. + request_headers: typing.Optional[Headers] = None + + #: HTTP request headers text. This has been replaced by the headers in Network.requestWillBeSentExtraInfo. + request_headers_text: typing.Optional[str] = None + + #: Remote IP address. + remote_ip_address: typing.Optional[str] = None + + #: Remote port. + remote_port: typing.Optional[int] = None + + #: Specifies that the request was served from the disk cache. + from_disk_cache: typing.Optional[bool] = None + + #: Specifies that the request was served from the ServiceWorker. + from_service_worker: typing.Optional[bool] = None + + #: Specifies that the request was served from the prefetch cache. + from_prefetch_cache: typing.Optional[bool] = None + + #: Specifies that the request was served from the prefetch cache. + from_early_hints: typing.Optional[bool] = None + + #: Information about how ServiceWorker Static Router API was used. If this + #: field is set with ``matchedSourceType`` field, a matching rule is found. + #: If this field is set without ``matchedSource``, no matching rule is found. + #: Otherwise, the API is not used. + service_worker_router_info: typing.Optional[ServiceWorkerRouterInfo] = None + + #: Timing information for the given request. + timing: typing.Optional[ResourceTiming] = None + + #: Response source of response from ServiceWorker. + service_worker_response_source: typing.Optional[ServiceWorkerResponseSource] = None + + #: The time at which the returned response was generated. + response_time: typing.Optional[TimeSinceEpoch] = None + + #: Cache Storage Cache Name. + cache_storage_cache_name: typing.Optional[str] = None + + #: Protocol used to fetch this request. + protocol: typing.Optional[str] = None + + #: The reason why Chrome uses a specific transport protocol for HTTP semantics. + alternate_protocol_usage: typing.Optional[AlternateProtocolUsage] = None + + #: Security details for the request. + security_details: typing.Optional[SecurityDetails] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["status"] = self.status + json["statusText"] = self.status_text + json["headers"] = self.headers.to_json() + json["mimeType"] = self.mime_type + json["charset"] = self.charset + json["connectionReused"] = self.connection_reused + json["connectionId"] = self.connection_id + json["encodedDataLength"] = self.encoded_data_length + json["securityState"] = self.security_state.to_json() + if self.headers_text is not None: + json["headersText"] = self.headers_text + if self.request_headers is not None: + json["requestHeaders"] = self.request_headers.to_json() + if self.request_headers_text is not None: + json["requestHeadersText"] = self.request_headers_text + if self.remote_ip_address is not None: + json["remoteIPAddress"] = self.remote_ip_address + if self.remote_port is not None: + json["remotePort"] = self.remote_port + if self.from_disk_cache is not None: + json["fromDiskCache"] = self.from_disk_cache + if self.from_service_worker is not None: + json["fromServiceWorker"] = self.from_service_worker + if self.from_prefetch_cache is not None: + json["fromPrefetchCache"] = self.from_prefetch_cache + if self.from_early_hints is not None: + json["fromEarlyHints"] = self.from_early_hints + if self.service_worker_router_info is not None: + json["serviceWorkerRouterInfo"] = self.service_worker_router_info.to_json() + if self.timing is not None: + json["timing"] = self.timing.to_json() + if self.service_worker_response_source is not None: + json["serviceWorkerResponseSource"] = ( + self.service_worker_response_source.to_json() + ) + if self.response_time is not None: + json["responseTime"] = self.response_time.to_json() + if self.cache_storage_cache_name is not None: + json["cacheStorageCacheName"] = self.cache_storage_cache_name + if self.protocol is not None: + json["protocol"] = self.protocol + if self.alternate_protocol_usage is not None: + json["alternateProtocolUsage"] = self.alternate_protocol_usage.to_json() + if self.security_details is not None: + json["securityDetails"] = self.security_details.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Response: + return cls( + url=str(json["url"]), + status=int(json["status"]), + status_text=str(json["statusText"]), + headers=Headers.from_json(json["headers"]), + mime_type=str(json["mimeType"]), + charset=str(json["charset"]), + connection_reused=bool(json["connectionReused"]), + connection_id=float(json["connectionId"]), + encoded_data_length=float(json["encodedDataLength"]), + security_state=security.SecurityState.from_json(json["securityState"]), + headers_text=( + str(json["headersText"]) + if json.get("headersText", None) is not None + else None + ), + request_headers=( + Headers.from_json(json["requestHeaders"]) + if json.get("requestHeaders", None) is not None + else None + ), + request_headers_text=( + str(json["requestHeadersText"]) + if json.get("requestHeadersText", None) is not None + else None + ), + remote_ip_address=( + str(json["remoteIPAddress"]) + if json.get("remoteIPAddress", None) is not None + else None + ), + remote_port=( + int(json["remotePort"]) + if json.get("remotePort", None) is not None + else None + ), + from_disk_cache=( + bool(json["fromDiskCache"]) + if json.get("fromDiskCache", None) is not None + else None + ), + from_service_worker=( + bool(json["fromServiceWorker"]) + if json.get("fromServiceWorker", None) is not None + else None + ), + from_prefetch_cache=( + bool(json["fromPrefetchCache"]) + if json.get("fromPrefetchCache", None) is not None + else None + ), + from_early_hints=( + bool(json["fromEarlyHints"]) + if json.get("fromEarlyHints", None) is not None + else None + ), + service_worker_router_info=( + ServiceWorkerRouterInfo.from_json(json["serviceWorkerRouterInfo"]) + if json.get("serviceWorkerRouterInfo", None) is not None + else None + ), + timing=( + ResourceTiming.from_json(json["timing"]) + if json.get("timing", None) is not None + else None + ), + service_worker_response_source=( + ServiceWorkerResponseSource.from_json( + json["serviceWorkerResponseSource"] + ) + if json.get("serviceWorkerResponseSource", None) is not None + else None + ), + response_time=( + TimeSinceEpoch.from_json(json["responseTime"]) + if json.get("responseTime", None) is not None + else None + ), + cache_storage_cache_name=( + str(json["cacheStorageCacheName"]) + if json.get("cacheStorageCacheName", None) is not None + else None + ), + protocol=( + str(json["protocol"]) + if json.get("protocol", None) is not None + else None + ), + alternate_protocol_usage=( + AlternateProtocolUsage.from_json(json["alternateProtocolUsage"]) + if json.get("alternateProtocolUsage", None) is not None + else None + ), + security_details=( + SecurityDetails.from_json(json["securityDetails"]) + if json.get("securityDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class WebSocketRequest: + """ + WebSocket request data. + """ + + #: HTTP request headers. + headers: Headers + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["headers"] = self.headers.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketRequest: + return cls( + headers=Headers.from_json(json["headers"]), + )
+ + + +
+[docs] +@dataclass +class WebSocketResponse: + """ + WebSocket response data. + """ + + #: HTTP response status code. + status: int + + #: HTTP response status text. + status_text: str + + #: HTTP response headers. + headers: Headers + + #: HTTP response headers text. + headers_text: typing.Optional[str] = None + + #: HTTP request headers. + request_headers: typing.Optional[Headers] = None + + #: HTTP request headers text. + request_headers_text: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["status"] = self.status + json["statusText"] = self.status_text + json["headers"] = self.headers.to_json() + if self.headers_text is not None: + json["headersText"] = self.headers_text + if self.request_headers is not None: + json["requestHeaders"] = self.request_headers.to_json() + if self.request_headers_text is not None: + json["requestHeadersText"] = self.request_headers_text + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketResponse: + return cls( + status=int(json["status"]), + status_text=str(json["statusText"]), + headers=Headers.from_json(json["headers"]), + headers_text=( + str(json["headersText"]) + if json.get("headersText", None) is not None + else None + ), + request_headers=( + Headers.from_json(json["requestHeaders"]) + if json.get("requestHeaders", None) is not None + else None + ), + request_headers_text=( + str(json["requestHeadersText"]) + if json.get("requestHeadersText", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class WebSocketFrame: + """ + WebSocket message data. This represents an entire WebSocket message, not just a fragmented frame as the name suggests. + """ + + #: WebSocket message opcode. + opcode: float + + #: WebSocket message mask. + mask: bool + + #: WebSocket message payload data. + #: If the opcode is 1, this is a text message and payloadData is a UTF-8 string. + #: If the opcode isn't 1, then payloadData is a base64 encoded string representing binary data. + payload_data: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["opcode"] = self.opcode + json["mask"] = self.mask + json["payloadData"] = self.payload_data + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketFrame: + return cls( + opcode=float(json["opcode"]), + mask=bool(json["mask"]), + payload_data=str(json["payloadData"]), + )
+ + + +
+[docs] +@dataclass +class CachedResource: + """ + Information about the cached resource. + """ + + #: Resource URL. This is the url of the original network request. + url: str + + #: Type of this resource. + type_: ResourceType + + #: Cached response body size. + body_size: float + + #: Cached response data. + response: typing.Optional[Response] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["type"] = self.type_.to_json() + json["bodySize"] = self.body_size + if self.response is not None: + json["response"] = self.response.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CachedResource: + return cls( + url=str(json["url"]), + type_=ResourceType.from_json(json["type"]), + body_size=float(json["bodySize"]), + response=( + Response.from_json(json["response"]) + if json.get("response", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Initiator: + """ + Information about the request initiator. + """ + + #: Type of this initiator. + type_: str + + #: Initiator JavaScript stack trace, set for Script only. + stack: typing.Optional[runtime.StackTrace] = None + + #: Initiator URL, set for Parser type or for Script type (when script is importing module) or for SignedExchange type. + url: typing.Optional[str] = None + + #: Initiator line number, set for Parser type or for Script type (when script is importing + #: module) (0-based). + line_number: typing.Optional[float] = None + + #: Initiator column number, set for Parser type or for Script type (when script is importing + #: module) (0-based). + column_number: typing.Optional[float] = None + + #: Set if another request triggered this request (e.g. preflight). + request_id: typing.Optional[RequestId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.stack is not None: + json["stack"] = self.stack.to_json() + if self.url is not None: + json["url"] = self.url + if self.line_number is not None: + json["lineNumber"] = self.line_number + if self.column_number is not None: + json["columnNumber"] = self.column_number + if self.request_id is not None: + json["requestId"] = self.request_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Initiator: + return cls( + type_=str(json["type"]), + stack=( + runtime.StackTrace.from_json(json["stack"]) + if json.get("stack", None) is not None + else None + ), + url=str(json["url"]) if json.get("url", None) is not None else None, + line_number=( + float(json["lineNumber"]) + if json.get("lineNumber", None) is not None + else None + ), + column_number=( + float(json["columnNumber"]) + if json.get("columnNumber", None) is not None + else None + ), + request_id=( + RequestId.from_json(json["requestId"]) + if json.get("requestId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CookiePartitionKey: + """ + cookiePartitionKey object + The representation of the components of the key that are created by the cookiePartitionKey class contained in net/cookies/cookie_partition_key.h. + """ + + #: The site of the top-level URL the browser was visiting at the start + #: of the request to the endpoint that set the cookie. + top_level_site: str + + #: Indicates if the cookie has any ancestors that are cross-site to the topLevelSite. + has_cross_site_ancestor: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["topLevelSite"] = self.top_level_site + json["hasCrossSiteAncestor"] = self.has_cross_site_ancestor + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CookiePartitionKey: + return cls( + top_level_site=str(json["topLevelSite"]), + has_cross_site_ancestor=bool(json["hasCrossSiteAncestor"]), + )
+ + + + + + + +
+[docs] +class SetCookieBlockedReason(enum.Enum): + """ + Types of reasons why a cookie may not be stored from a response. + """ + + SECURE_ONLY = "SecureOnly" + SAME_SITE_STRICT = "SameSiteStrict" + SAME_SITE_LAX = "SameSiteLax" + SAME_SITE_UNSPECIFIED_TREATED_AS_LAX = "SameSiteUnspecifiedTreatedAsLax" + SAME_SITE_NONE_INSECURE = "SameSiteNoneInsecure" + USER_PREFERENCES = "UserPreferences" + THIRD_PARTY_PHASEOUT = "ThirdPartyPhaseout" + THIRD_PARTY_BLOCKED_IN_FIRST_PARTY_SET = "ThirdPartyBlockedInFirstPartySet" + SYNTAX_ERROR = "SyntaxError" + SCHEME_NOT_SUPPORTED = "SchemeNotSupported" + OVERWRITE_SECURE = "OverwriteSecure" + INVALID_DOMAIN = "InvalidDomain" + INVALID_PREFIX = "InvalidPrefix" + UNKNOWN_ERROR = "UnknownError" + SCHEMEFUL_SAME_SITE_STRICT = "SchemefulSameSiteStrict" + SCHEMEFUL_SAME_SITE_LAX = "SchemefulSameSiteLax" + SCHEMEFUL_SAME_SITE_UNSPECIFIED_TREATED_AS_LAX = ( + "SchemefulSameSiteUnspecifiedTreatedAsLax" + ) + SAME_PARTY_FROM_CROSS_PARTY_CONTEXT = "SamePartyFromCrossPartyContext" + SAME_PARTY_CONFLICTS_WITH_OTHER_ATTRIBUTES = "SamePartyConflictsWithOtherAttributes" + NAME_VALUE_PAIR_EXCEEDS_MAX_SIZE = "NameValuePairExceedsMaxSize" + DISALLOWED_CHARACTER = "DisallowedCharacter" + NO_COOKIE_CONTENT = "NoCookieContent" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SetCookieBlockedReason: + return cls(json)
+ + + +
+[docs] +class CookieBlockedReason(enum.Enum): + """ + Types of reasons why a cookie may not be sent with a request. + """ + + SECURE_ONLY = "SecureOnly" + NOT_ON_PATH = "NotOnPath" + DOMAIN_MISMATCH = "DomainMismatch" + SAME_SITE_STRICT = "SameSiteStrict" + SAME_SITE_LAX = "SameSiteLax" + SAME_SITE_UNSPECIFIED_TREATED_AS_LAX = "SameSiteUnspecifiedTreatedAsLax" + SAME_SITE_NONE_INSECURE = "SameSiteNoneInsecure" + USER_PREFERENCES = "UserPreferences" + THIRD_PARTY_PHASEOUT = "ThirdPartyPhaseout" + THIRD_PARTY_BLOCKED_IN_FIRST_PARTY_SET = "ThirdPartyBlockedInFirstPartySet" + UNKNOWN_ERROR = "UnknownError" + SCHEMEFUL_SAME_SITE_STRICT = "SchemefulSameSiteStrict" + SCHEMEFUL_SAME_SITE_LAX = "SchemefulSameSiteLax" + SCHEMEFUL_SAME_SITE_UNSPECIFIED_TREATED_AS_LAX = ( + "SchemefulSameSiteUnspecifiedTreatedAsLax" + ) + SAME_PARTY_FROM_CROSS_PARTY_CONTEXT = "SamePartyFromCrossPartyContext" + NAME_VALUE_PAIR_EXCEEDS_MAX_SIZE = "NameValuePairExceedsMaxSize" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieBlockedReason: + return cls(json)
+ + + +
+[docs] +class CookieExemptionReason(enum.Enum): + """ + Types of reasons why a cookie should have been blocked by 3PCD but is exempted for the request. + """ + + NONE = "None" + USER_SETTING = "UserSetting" + TPCD_METADATA = "TPCDMetadata" + TPCD_DEPRECATION_TRIAL = "TPCDDeprecationTrial" + TPCD_HEURISTICS = "TPCDHeuristics" + ENTERPRISE_POLICY = "EnterprisePolicy" + STORAGE_ACCESS = "StorageAccess" + TOP_LEVEL_STORAGE_ACCESS = "TopLevelStorageAccess" + CORS_OPT_IN = "CorsOptIn" + SCHEME = "Scheme" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CookieExemptionReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class BlockedSetCookieWithReason: + """ + A cookie which was not stored from a response with the corresponding reason. + """ + + #: The reason(s) this cookie was blocked. + blocked_reasons: typing.List[SetCookieBlockedReason] + + #: The string representing this individual cookie as it would appear in the header. + #: This is not the entire "cookie" or "set-cookie" header which could have multiple cookies. + cookie_line: str + + #: The cookie object which represents the cookie which was not stored. It is optional because + #: sometimes complete cookie information is not available, such as in the case of parsing + #: errors. + cookie: typing.Optional[Cookie] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["blockedReasons"] = [i.to_json() for i in self.blocked_reasons] + json["cookieLine"] = self.cookie_line + if self.cookie is not None: + json["cookie"] = self.cookie.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BlockedSetCookieWithReason: + return cls( + blocked_reasons=[ + SetCookieBlockedReason.from_json(i) for i in json["blockedReasons"] + ], + cookie_line=str(json["cookieLine"]), + cookie=( + Cookie.from_json(json["cookie"]) + if json.get("cookie", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ExemptedSetCookieWithReason: + """ + A cookie should have been blocked by 3PCD but is exempted and stored from a response with the + corresponding reason. A cookie could only have at most one exemption reason. + """ + + #: The reason the cookie was exempted. + exemption_reason: CookieExemptionReason + + #: The string representing this individual cookie as it would appear in the header. + cookie_line: str + + #: The cookie object representing the cookie. + cookie: Cookie + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["exemptionReason"] = self.exemption_reason.to_json() + json["cookieLine"] = self.cookie_line + json["cookie"] = self.cookie.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExemptedSetCookieWithReason: + return cls( + exemption_reason=CookieExemptionReason.from_json(json["exemptionReason"]), + cookie_line=str(json["cookieLine"]), + cookie=Cookie.from_json(json["cookie"]), + )
+ + + +
+[docs] +@dataclass +class AssociatedCookie: + """ + A cookie associated with the request which may or may not be sent with it. + Includes the cookies itself and reasons for blocking or exemption. + """ + + #: The cookie object representing the cookie which was not sent. + cookie: Cookie + + #: The reason(s) the cookie was blocked. If empty means the cookie is included. + blocked_reasons: typing.List[CookieBlockedReason] + + #: The reason the cookie should have been blocked by 3PCD but is exempted. A cookie could + #: only have at most one exemption reason. + exemption_reason: typing.Optional[CookieExemptionReason] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["cookie"] = self.cookie.to_json() + json["blockedReasons"] = [i.to_json() for i in self.blocked_reasons] + if self.exemption_reason is not None: + json["exemptionReason"] = self.exemption_reason.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AssociatedCookie: + return cls( + cookie=Cookie.from_json(json["cookie"]), + blocked_reasons=[ + CookieBlockedReason.from_json(i) for i in json["blockedReasons"] + ], + exemption_reason=( + CookieExemptionReason.from_json(json["exemptionReason"]) + if json.get("exemptionReason", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CookieParam: + """ + Cookie parameter object + """ + + #: Cookie name. + name: str + + #: Cookie value. + value: str + + #: The request-URI to associate with the setting of the cookie. This value can affect the + #: default domain, path, source port, and source scheme values of the created cookie. + url: typing.Optional[str] = None + + #: Cookie domain. + domain: typing.Optional[str] = None + + #: Cookie path. + path: typing.Optional[str] = None + + #: True if cookie is secure. + secure: typing.Optional[bool] = None + + #: True if cookie is http-only. + http_only: typing.Optional[bool] = None + + #: Cookie SameSite type. + same_site: typing.Optional[CookieSameSite] = None + + #: Cookie expiration date, session cookie if not set + expires: typing.Optional[TimeSinceEpoch] = None + + #: Cookie Priority. + priority: typing.Optional[CookiePriority] = None + + #: True if cookie is SameParty. + same_party: typing.Optional[bool] = None + + #: Cookie source scheme type. + source_scheme: typing.Optional[CookieSourceScheme] = None + + #: Cookie source port. Valid values are {-1, [1, 65535]}, -1 indicates an unspecified port. + #: An unspecified port value allows protocol clients to emulate legacy cookie scope for the port. + #: This is a temporary ability and it will be removed in the future. + source_port: typing.Optional[int] = None + + #: Cookie partition key. If not set, the cookie will be set as not partitioned. + partition_key: typing.Optional[CookiePartitionKey] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + if self.url is not None: + json["url"] = self.url + if self.domain is not None: + json["domain"] = self.domain + if self.path is not None: + json["path"] = self.path + if self.secure is not None: + json["secure"] = self.secure + if self.http_only is not None: + json["httpOnly"] = self.http_only + if self.same_site is not None: + json["sameSite"] = self.same_site.to_json() + if self.expires is not None: + json["expires"] = self.expires.to_json() + if self.priority is not None: + json["priority"] = self.priority.to_json() + if self.same_party is not None: + json["sameParty"] = self.same_party + if self.source_scheme is not None: + json["sourceScheme"] = self.source_scheme.to_json() + if self.source_port is not None: + json["sourcePort"] = self.source_port + if self.partition_key is not None: + json["partitionKey"] = self.partition_key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CookieParam: + return cls( + name=str(json["name"]), + value=str(json["value"]), + url=str(json["url"]) if json.get("url", None) is not None else None, + domain=( + str(json["domain"]) if json.get("domain", None) is not None else None + ), + path=str(json["path"]) if json.get("path", None) is not None else None, + secure=( + bool(json["secure"]) if json.get("secure", None) is not None else None + ), + http_only=( + bool(json["httpOnly"]) + if json.get("httpOnly", None) is not None + else None + ), + same_site=( + CookieSameSite.from_json(json["sameSite"]) + if json.get("sameSite", None) is not None + else None + ), + expires=( + TimeSinceEpoch.from_json(json["expires"]) + if json.get("expires", None) is not None + else None + ), + priority=( + CookiePriority.from_json(json["priority"]) + if json.get("priority", None) is not None + else None + ), + same_party=( + bool(json["sameParty"]) + if json.get("sameParty", None) is not None + else None + ), + source_scheme=( + CookieSourceScheme.from_json(json["sourceScheme"]) + if json.get("sourceScheme", None) is not None + else None + ), + source_port=( + int(json["sourcePort"]) + if json.get("sourcePort", None) is not None + else None + ), + partition_key=( + CookiePartitionKey.from_json(json["partitionKey"]) + if json.get("partitionKey", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AuthChallenge: + """ + Authorization challenge for HTTP status code 401 or 407. + """ + + #: Origin of the challenger. + origin: str + + #: The authentication scheme used, such as basic or digest + scheme: str + + #: The realm of the challenge. May be empty. + realm: str + + #: Source of the authentication challenge. + source: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin + json["scheme"] = self.scheme + json["realm"] = self.realm + if self.source is not None: + json["source"] = self.source + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AuthChallenge: + return cls( + origin=str(json["origin"]), + scheme=str(json["scheme"]), + realm=str(json["realm"]), + source=( + str(json["source"]) if json.get("source", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class AuthChallengeResponse: + """ + Response to an AuthChallenge. + """ + + #: The decision on what to do in response to the authorization challenge. Default means + #: deferring to the default behavior of the net stack, which will likely either the Cancel + #: authentication or display a popup dialog box. + response: str + + #: The username to provide, possibly empty. Should only be set if response is + #: ProvideCredentials. + username: typing.Optional[str] = None + + #: The password to provide, possibly empty. Should only be set if response is + #: ProvideCredentials. + password: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["response"] = self.response + if self.username is not None: + json["username"] = self.username + if self.password is not None: + json["password"] = self.password + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AuthChallengeResponse: + return cls( + response=str(json["response"]), + username=( + str(json["username"]) + if json.get("username", None) is not None + else None + ), + password=( + str(json["password"]) + if json.get("password", None) is not None + else None + ), + )
+ + + +
+[docs] +class InterceptionStage(enum.Enum): + """ + Stages of the interception to begin intercepting. Request will intercept before the request is + sent. Response will intercept after the response is received. + """ + + REQUEST = "Request" + HEADERS_RECEIVED = "HeadersReceived" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InterceptionStage: + return cls(json)
+ + + +
+[docs] +@dataclass +class RequestPattern: + """ + Request pattern for interception. + """ + + #: Wildcards (``'*'`` -> zero or more, ``'?'`` -> exactly one) are allowed. Escape character is + #: backslash. Omitting is equivalent to ``"*"``. + url_pattern: typing.Optional[str] = None + + #: If set, only requests for matching resource types will be intercepted. + resource_type: typing.Optional[ResourceType] = None + + #: Stage at which to begin intercepting requests. Default is Request. + interception_stage: typing.Optional[InterceptionStage] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.url_pattern is not None: + json["urlPattern"] = self.url_pattern + if self.resource_type is not None: + json["resourceType"] = self.resource_type.to_json() + if self.interception_stage is not None: + json["interceptionStage"] = self.interception_stage.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestPattern: + return cls( + url_pattern=( + str(json["urlPattern"]) + if json.get("urlPattern", None) is not None + else None + ), + resource_type=( + ResourceType.from_json(json["resourceType"]) + if json.get("resourceType", None) is not None + else None + ), + interception_stage=( + InterceptionStage.from_json(json["interceptionStage"]) + if json.get("interceptionStage", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SignedExchangeSignature: + """ + Information about a signed exchange signature. + https://wicg.github.io/webpackage/draft-yasskin-httpbis-origin-signed-exchanges-impl.html#rfc.section.3.1 + """ + + #: Signed exchange signature label. + label: str + + #: The hex string of signed exchange signature. + signature: str + + #: Signed exchange signature integrity. + integrity: str + + #: Signed exchange signature validity Url. + validity_url: str + + #: Signed exchange signature date. + date: int + + #: Signed exchange signature expires. + expires: int + + #: Signed exchange signature cert Url. + cert_url: typing.Optional[str] = None + + #: The hex string of signed exchange signature cert sha256. + cert_sha256: typing.Optional[str] = None + + #: The encoded certificates. + certificates: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["label"] = self.label + json["signature"] = self.signature + json["integrity"] = self.integrity + json["validityUrl"] = self.validity_url + json["date"] = self.date + json["expires"] = self.expires + if self.cert_url is not None: + json["certUrl"] = self.cert_url + if self.cert_sha256 is not None: + json["certSha256"] = self.cert_sha256 + if self.certificates is not None: + json["certificates"] = [i for i in self.certificates] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedExchangeSignature: + return cls( + label=str(json["label"]), + signature=str(json["signature"]), + integrity=str(json["integrity"]), + validity_url=str(json["validityUrl"]), + date=int(json["date"]), + expires=int(json["expires"]), + cert_url=( + str(json["certUrl"]) if json.get("certUrl", None) is not None else None + ), + cert_sha256=( + str(json["certSha256"]) + if json.get("certSha256", None) is not None + else None + ), + certificates=( + [str(i) for i in json["certificates"]] + if json.get("certificates", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SignedExchangeHeader: + """ + Information about a signed exchange header. + https://wicg.github.io/webpackage/draft-yasskin-httpbis-origin-signed-exchanges-impl.html#cbor-representation + """ + + #: Signed exchange request URL. + request_url: str + + #: Signed exchange response code. + response_code: int + + #: Signed exchange response headers. + response_headers: Headers + + #: Signed exchange response signature. + signatures: typing.List[SignedExchangeSignature] + + #: Signed exchange header integrity hash in the form of ``sha256-<base64-hash-value>``. + header_integrity: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["requestUrl"] = self.request_url + json["responseCode"] = self.response_code + json["responseHeaders"] = self.response_headers.to_json() + json["signatures"] = [i.to_json() for i in self.signatures] + json["headerIntegrity"] = self.header_integrity + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedExchangeHeader: + return cls( + request_url=str(json["requestUrl"]), + response_code=int(json["responseCode"]), + response_headers=Headers.from_json(json["responseHeaders"]), + signatures=[ + SignedExchangeSignature.from_json(i) for i in json["signatures"] + ], + header_integrity=str(json["headerIntegrity"]), + )
+ + + +
+[docs] +class SignedExchangeErrorField(enum.Enum): + """ + Field type for a signed exchange related error. + """ + + SIGNATURE_SIG = "signatureSig" + SIGNATURE_INTEGRITY = "signatureIntegrity" + SIGNATURE_CERT_URL = "signatureCertUrl" + SIGNATURE_CERT_SHA256 = "signatureCertSha256" + SIGNATURE_VALIDITY_URL = "signatureValidityUrl" + SIGNATURE_TIMESTAMPS = "signatureTimestamps" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SignedExchangeErrorField: + return cls(json)
+ + + +
+[docs] +@dataclass +class SignedExchangeError: + """ + Information about a signed exchange response. + """ + + #: Error message. + message: str + + #: The index of the signature which caused the error. + signature_index: typing.Optional[int] = None + + #: The field which caused the error. + error_field: typing.Optional[SignedExchangeErrorField] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["message"] = self.message + if self.signature_index is not None: + json["signatureIndex"] = self.signature_index + if self.error_field is not None: + json["errorField"] = self.error_field.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedExchangeError: + return cls( + message=str(json["message"]), + signature_index=( + int(json["signatureIndex"]) + if json.get("signatureIndex", None) is not None + else None + ), + error_field=( + SignedExchangeErrorField.from_json(json["errorField"]) + if json.get("errorField", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SignedExchangeInfo: + """ + Information about a signed exchange response. + """ + + #: The outer response of signed HTTP exchange which was received from network. + outer_response: Response + + #: Information about the signed exchange header. + header: typing.Optional[SignedExchangeHeader] = None + + #: Security details for the signed exchange header. + security_details: typing.Optional[SecurityDetails] = None + + #: Errors occurred while handling the signed exchange. + errors: typing.Optional[typing.List[SignedExchangeError]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["outerResponse"] = self.outer_response.to_json() + if self.header is not None: + json["header"] = self.header.to_json() + if self.security_details is not None: + json["securityDetails"] = self.security_details.to_json() + if self.errors is not None: + json["errors"] = [i.to_json() for i in self.errors] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedExchangeInfo: + return cls( + outer_response=Response.from_json(json["outerResponse"]), + header=( + SignedExchangeHeader.from_json(json["header"]) + if json.get("header", None) is not None + else None + ), + security_details=( + SecurityDetails.from_json(json["securityDetails"]) + if json.get("securityDetails", None) is not None + else None + ), + errors=( + [SignedExchangeError.from_json(i) for i in json["errors"]] + if json.get("errors", None) is not None + else None + ), + )
+ + + +
+[docs] +class ContentEncoding(enum.Enum): + """ + List of content encodings supported by the backend. + """ + + DEFLATE = "deflate" + GZIP = "gzip" + BR = "br" + ZSTD = "zstd" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContentEncoding: + return cls(json)
+ + + +
+[docs] +class PrivateNetworkRequestPolicy(enum.Enum): + ALLOW = "Allow" + BLOCK_FROM_INSECURE_TO_MORE_PRIVATE = "BlockFromInsecureToMorePrivate" + WARN_FROM_INSECURE_TO_MORE_PRIVATE = "WarnFromInsecureToMorePrivate" + PREFLIGHT_BLOCK = "PreflightBlock" + PREFLIGHT_WARN = "PreflightWarn" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PrivateNetworkRequestPolicy: + return cls(json)
+ + + +
+[docs] +class IPAddressSpace(enum.Enum): + LOCAL = "Local" + PRIVATE = "Private" + PUBLIC = "Public" + UNKNOWN = "Unknown" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> IPAddressSpace: + return cls(json)
+ + + +
+[docs] +@dataclass +class ConnectTiming: + #: Timing's requestTime is a baseline in seconds, while the other numbers are ticks in + #: milliseconds relatively to this requestTime. Matches ResourceTiming's requestTime for + #: the same request (but not for redirected requests). + request_time: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["requestTime"] = self.request_time + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ConnectTiming: + return cls( + request_time=float(json["requestTime"]), + )
+ + + +
+[docs] +@dataclass +class ClientSecurityState: + initiator_is_secure_context: bool + + initiator_ip_address_space: IPAddressSpace + + private_network_request_policy: PrivateNetworkRequestPolicy + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["initiatorIsSecureContext"] = self.initiator_is_secure_context + json["initiatorIPAddressSpace"] = self.initiator_ip_address_space.to_json() + json["privateNetworkRequestPolicy"] = ( + self.private_network_request_policy.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ClientSecurityState: + return cls( + initiator_is_secure_context=bool(json["initiatorIsSecureContext"]), + initiator_ip_address_space=IPAddressSpace.from_json( + json["initiatorIPAddressSpace"] + ), + private_network_request_policy=PrivateNetworkRequestPolicy.from_json( + json["privateNetworkRequestPolicy"] + ), + )
+ + + +
+[docs] +class CrossOriginOpenerPolicyValue(enum.Enum): + SAME_ORIGIN = "SameOrigin" + SAME_ORIGIN_ALLOW_POPUPS = "SameOriginAllowPopups" + RESTRICT_PROPERTIES = "RestrictProperties" + UNSAFE_NONE = "UnsafeNone" + SAME_ORIGIN_PLUS_COEP = "SameOriginPlusCoep" + RESTRICT_PROPERTIES_PLUS_COEP = "RestrictPropertiesPlusCoep" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CrossOriginOpenerPolicyValue: + return cls(json)
+ + + +
+[docs] +@dataclass +class CrossOriginOpenerPolicyStatus: + value: CrossOriginOpenerPolicyValue + + report_only_value: CrossOriginOpenerPolicyValue + + reporting_endpoint: typing.Optional[str] = None + + report_only_reporting_endpoint: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value.to_json() + json["reportOnlyValue"] = self.report_only_value.to_json() + if self.reporting_endpoint is not None: + json["reportingEndpoint"] = self.reporting_endpoint + if self.report_only_reporting_endpoint is not None: + json["reportOnlyReportingEndpoint"] = self.report_only_reporting_endpoint + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CrossOriginOpenerPolicyStatus: + return cls( + value=CrossOriginOpenerPolicyValue.from_json(json["value"]), + report_only_value=CrossOriginOpenerPolicyValue.from_json( + json["reportOnlyValue"] + ), + reporting_endpoint=( + str(json["reportingEndpoint"]) + if json.get("reportingEndpoint", None) is not None + else None + ), + report_only_reporting_endpoint=( + str(json["reportOnlyReportingEndpoint"]) + if json.get("reportOnlyReportingEndpoint", None) is not None + else None + ), + )
+ + + +
+[docs] +class CrossOriginEmbedderPolicyValue(enum.Enum): + NONE = "None" + CREDENTIALLESS = "Credentialless" + REQUIRE_CORP = "RequireCorp" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CrossOriginEmbedderPolicyValue: + return cls(json)
+ + + +
+[docs] +@dataclass +class CrossOriginEmbedderPolicyStatus: + value: CrossOriginEmbedderPolicyValue + + report_only_value: CrossOriginEmbedderPolicyValue + + reporting_endpoint: typing.Optional[str] = None + + report_only_reporting_endpoint: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value.to_json() + json["reportOnlyValue"] = self.report_only_value.to_json() + if self.reporting_endpoint is not None: + json["reportingEndpoint"] = self.reporting_endpoint + if self.report_only_reporting_endpoint is not None: + json["reportOnlyReportingEndpoint"] = self.report_only_reporting_endpoint + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CrossOriginEmbedderPolicyStatus: + return cls( + value=CrossOriginEmbedderPolicyValue.from_json(json["value"]), + report_only_value=CrossOriginEmbedderPolicyValue.from_json( + json["reportOnlyValue"] + ), + reporting_endpoint=( + str(json["reportingEndpoint"]) + if json.get("reportingEndpoint", None) is not None + else None + ), + report_only_reporting_endpoint=( + str(json["reportOnlyReportingEndpoint"]) + if json.get("reportOnlyReportingEndpoint", None) is not None + else None + ), + )
+ + + +
+[docs] +class ContentSecurityPolicySource(enum.Enum): + HTTP = "HTTP" + META = "Meta" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContentSecurityPolicySource: + return cls(json)
+ + + +
+[docs] +@dataclass +class ContentSecurityPolicyStatus: + effective_directives: str + + is_enforced: bool + + source: ContentSecurityPolicySource + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["effectiveDirectives"] = self.effective_directives + json["isEnforced"] = self.is_enforced + json["source"] = self.source.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContentSecurityPolicyStatus: + return cls( + effective_directives=str(json["effectiveDirectives"]), + is_enforced=bool(json["isEnforced"]), + source=ContentSecurityPolicySource.from_json(json["source"]), + )
+ + + +
+[docs] +@dataclass +class SecurityIsolationStatus: + coop: typing.Optional[CrossOriginOpenerPolicyStatus] = None + + coep: typing.Optional[CrossOriginEmbedderPolicyStatus] = None + + csp: typing.Optional[typing.List[ContentSecurityPolicyStatus]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.coop is not None: + json["coop"] = self.coop.to_json() + if self.coep is not None: + json["coep"] = self.coep.to_json() + if self.csp is not None: + json["csp"] = [i.to_json() for i in self.csp] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SecurityIsolationStatus: + return cls( + coop=( + CrossOriginOpenerPolicyStatus.from_json(json["coop"]) + if json.get("coop", None) is not None + else None + ), + coep=( + CrossOriginEmbedderPolicyStatus.from_json(json["coep"]) + if json.get("coep", None) is not None + else None + ), + csp=( + [ContentSecurityPolicyStatus.from_json(i) for i in json["csp"]] + if json.get("csp", None) is not None + else None + ), + )
+ + + +
+[docs] +class ReportStatus(enum.Enum): + """ + The status of a Reporting API report. + """ + + QUEUED = "Queued" + PENDING = "Pending" + MARKED_FOR_REMOVAL = "MarkedForRemoval" + SUCCESS = "Success" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ReportStatus: + return cls(json)
+ + + +
+[docs] +class ReportId(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> ReportId: + return cls(json) + + def __repr__(self): + return "ReportId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class ReportingApiReport: + """ + An object representing a report generated by the Reporting API. + """ + + id_: ReportId + + #: The URL of the document that triggered the report. + initiator_url: str + + #: The name of the endpoint group that should be used to deliver the report. + destination: str + + #: The type of the report (specifies the set of data that is contained in the report body). + type_: str + + #: When the report was generated. + timestamp: TimeSinceEpoch + + #: How many uploads deep the related request was. + depth: int + + #: The number of delivery attempts made so far, not including an active attempt. + completed_attempts: int + + body: dict + + status: ReportStatus + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["initiatorUrl"] = self.initiator_url + json["destination"] = self.destination + json["type"] = self.type_ + json["timestamp"] = self.timestamp.to_json() + json["depth"] = self.depth + json["completedAttempts"] = self.completed_attempts + json["body"] = self.body + json["status"] = self.status.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportingApiReport: + return cls( + id_=ReportId.from_json(json["id"]), + initiator_url=str(json["initiatorUrl"]), + destination=str(json["destination"]), + type_=str(json["type"]), + timestamp=TimeSinceEpoch.from_json(json["timestamp"]), + depth=int(json["depth"]), + completed_attempts=int(json["completedAttempts"]), + body=dict(json["body"]), + status=ReportStatus.from_json(json["status"]), + )
+ + + +
+[docs] +@dataclass +class ReportingApiEndpoint: + #: The URL of the endpoint to which reports may be delivered. + url: str + + #: Name of the endpoint group. + group_name: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["groupName"] = self.group_name + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportingApiEndpoint: + return cls( + url=str(json["url"]), + group_name=str(json["groupName"]), + )
+ + + +
+[docs] +@dataclass +class LoadNetworkResourcePageResult: + """ + An object providing the result of a network resource load. + """ + + success: bool + + #: Optional values used for error reporting. + net_error: typing.Optional[float] = None + + net_error_name: typing.Optional[str] = None + + http_status_code: typing.Optional[float] = None + + #: If successful, one of the following two fields holds the result. + stream: typing.Optional[io.StreamHandle] = None + + #: Response headers. + headers: typing.Optional[Headers] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["success"] = self.success + if self.net_error is not None: + json["netError"] = self.net_error + if self.net_error_name is not None: + json["netErrorName"] = self.net_error_name + if self.http_status_code is not None: + json["httpStatusCode"] = self.http_status_code + if self.stream is not None: + json["stream"] = self.stream.to_json() + if self.headers is not None: + json["headers"] = self.headers.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadNetworkResourcePageResult: + return cls( + success=bool(json["success"]), + net_error=( + float(json["netError"]) + if json.get("netError", None) is not None + else None + ), + net_error_name=( + str(json["netErrorName"]) + if json.get("netErrorName", None) is not None + else None + ), + http_status_code=( + float(json["httpStatusCode"]) + if json.get("httpStatusCode", None) is not None + else None + ), + stream=( + io.StreamHandle.from_json(json["stream"]) + if json.get("stream", None) is not None + else None + ), + headers=( + Headers.from_json(json["headers"]) + if json.get("headers", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class LoadNetworkResourceOptions: + """ + An options object that may be extended later to better support CORS, + CORB and streaming. + """ + + disable_cache: bool + + include_credentials: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["disableCache"] = self.disable_cache + json["includeCredentials"] = self.include_credentials + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadNetworkResourceOptions: + return cls( + disable_cache=bool(json["disableCache"]), + include_credentials=bool(json["includeCredentials"]), + )
+ + + +
+[docs] +def set_accepted_encodings( + encodings: typing.List[ContentEncoding], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets a list of content encodings that will be accepted. Empty list means no encoding is accepted. + + **EXPERIMENTAL** + + :param encodings: List of accepted content encodings. + """ + params: T_JSON_DICT = dict() + params["encodings"] = [i.to_json() for i in encodings] + cmd_dict: T_JSON_DICT = { + "method": "Network.setAcceptedEncodings", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_accepted_encodings_override() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, None] +): + """ + Clears accepted encodings set by setAcceptedEncodings + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.clearAcceptedEncodingsOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def can_clear_browser_cache() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Tells whether clearing browser cache is supported. + + .. deprecated:: 1.3 + + :returns: True if browser cache can be cleared. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.canClearBrowserCache", + } + json = yield cmd_dict + return bool(json["result"])
+ + + +
+[docs] +@deprecated(version="1.3") +def can_clear_browser_cookies() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Tells whether clearing browser cookies is supported. + + .. deprecated:: 1.3 + + :returns: True if browser cookies can be cleared. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.canClearBrowserCookies", + } + json = yield cmd_dict + return bool(json["result"])
+ + + +
+[docs] +@deprecated(version="1.3") +def can_emulate_network_conditions() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, bool] +): + """ + Tells whether emulation of network conditions is supported. + + .. deprecated:: 1.3 + + :returns: True if emulation of network conditions is supported. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.canEmulateNetworkConditions", + } + json = yield cmd_dict + return bool(json["result"])
+ + + +
+[docs] +def clear_browser_cache() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears browser cache. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.clearBrowserCache", + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_browser_cookies() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears browser cookies. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.clearBrowserCookies", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def continue_intercepted_request( + interception_id: InterceptionId, + error_reason: typing.Optional[ErrorReason] = None, + raw_response: typing.Optional[str] = None, + url: typing.Optional[str] = None, + method: typing.Optional[str] = None, + post_data: typing.Optional[str] = None, + headers: typing.Optional[Headers] = None, + auth_challenge_response: typing.Optional[AuthChallengeResponse] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Response to Network.requestIntercepted which either modifies the request to continue with any + modifications, or blocks it, or completes it with the provided response bytes. If a network + fetch occurs as a result which encounters a redirect an additional Network.requestIntercepted + event will be sent with the same InterceptionId. + Deprecated, use Fetch.continueRequest, Fetch.fulfillRequest and Fetch.failRequest instead. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param interception_id: + :param error_reason: *(Optional)* If set this causes the request to fail with the given reason. Passing ```Aborted```` for requests marked with ````isNavigationRequest``` also cancels the navigation. Must not be set in response to an authChallenge. + :param raw_response: *(Optional)* If set the requests completes using with the provided base64 encoded raw response, including HTTP status line and headers etc... Must not be set in response to an authChallenge. (Encoded as a base64 string when passed over JSON) + :param url: *(Optional)* If set the request url will be modified in a way that's not observable by page. Must not be set in response to an authChallenge. + :param method: *(Optional)* If set this allows the request method to be overridden. Must not be set in response to an authChallenge. + :param post_data: *(Optional)* If set this allows postData to be set. Must not be set in response to an authChallenge. + :param headers: *(Optional)* If set this allows the request headers to be changed. Must not be set in response to an authChallenge. + :param auth_challenge_response: *(Optional)* Response to a requestIntercepted with an authChallenge. Must not be set otherwise. + """ + params: T_JSON_DICT = dict() + params["interceptionId"] = interception_id.to_json() + if error_reason is not None: + params["errorReason"] = error_reason.to_json() + if raw_response is not None: + params["rawResponse"] = raw_response + if url is not None: + params["url"] = url + if method is not None: + params["method"] = method + if post_data is not None: + params["postData"] = post_data + if headers is not None: + params["headers"] = headers.to_json() + if auth_challenge_response is not None: + params["authChallengeResponse"] = auth_challenge_response.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.continueInterceptedRequest", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_cookies( + name: str, + url: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + path: typing.Optional[str] = None, + partition_key: typing.Optional[CookiePartitionKey] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes browser cookies with matching name and url or domain/path/partitionKey pair. + + :param name: Name of the cookies to remove. + :param url: *(Optional)* If specified, deletes all the cookies with the given name where domain and path match provided URL. + :param domain: *(Optional)* If specified, deletes only cookies with the exact domain. + :param path: *(Optional)* If specified, deletes only cookies with the exact path. + :param partition_key: **(EXPERIMENTAL)** *(Optional)* If specified, deletes only cookies with the the given name and partitionKey where all partition key attributes match the cookie partition key attribute. + """ + params: T_JSON_DICT = dict() + params["name"] = name + if url is not None: + params["url"] = url + if domain is not None: + params["domain"] = domain + if path is not None: + params["path"] = path + if partition_key is not None: + params["partitionKey"] = partition_key.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.deleteCookies", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables network tracking, prevents network events from being sent to the client. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def emulate_network_conditions( + offline: bool, + latency: float, + download_throughput: float, + upload_throughput: float, + connection_type: typing.Optional[ConnectionType] = None, + packet_loss: typing.Optional[float] = None, + packet_queue_length: typing.Optional[int] = None, + packet_reordering: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Activates emulation of network conditions. + + :param offline: True to emulate internet disconnection. + :param latency: Minimum latency from request sent to response headers received (ms). + :param download_throughput: Maximal aggregated download throughput (bytes/sec). -1 disables download throttling. + :param upload_throughput: Maximal aggregated upload throughput (bytes/sec). -1 disables upload throttling. + :param connection_type: *(Optional)* Connection type if known. + :param packet_loss: **(EXPERIMENTAL)** *(Optional)* WebRTC packet loss (percent, 0-100). 0 disables packet loss emulation, 100 drops all the packets. + :param packet_queue_length: **(EXPERIMENTAL)** *(Optional)* WebRTC packet queue length (packet). 0 removes any queue length limitations. + :param packet_reordering: **(EXPERIMENTAL)** *(Optional)* WebRTC packetReordering feature. + """ + params: T_JSON_DICT = dict() + params["offline"] = offline + params["latency"] = latency + params["downloadThroughput"] = download_throughput + params["uploadThroughput"] = upload_throughput + if connection_type is not None: + params["connectionType"] = connection_type.to_json() + if packet_loss is not None: + params["packetLoss"] = packet_loss + if packet_queue_length is not None: + params["packetQueueLength"] = packet_queue_length + if packet_reordering is not None: + params["packetReordering"] = packet_reordering + cmd_dict: T_JSON_DICT = { + "method": "Network.emulateNetworkConditions", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def enable( + max_total_buffer_size: typing.Optional[int] = None, + max_resource_buffer_size: typing.Optional[int] = None, + max_post_data_size: typing.Optional[int] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables network tracking, network events will now be delivered to the client. + + :param max_total_buffer_size: **(EXPERIMENTAL)** *(Optional)* Buffer size in bytes to use when preserving network payloads (XHRs, etc). + :param max_resource_buffer_size: **(EXPERIMENTAL)** *(Optional)* Per-resource buffer size in bytes to use when preserving network payloads (XHRs, etc). + :param max_post_data_size: *(Optional)* Longest post body size (in bytes) that would be included in requestWillBeSent notification + """ + params: T_JSON_DICT = dict() + if max_total_buffer_size is not None: + params["maxTotalBufferSize"] = max_total_buffer_size + if max_resource_buffer_size is not None: + params["maxResourceBufferSize"] = max_resource_buffer_size + if max_post_data_size is not None: + params["maxPostDataSize"] = max_post_data_size + cmd_dict: T_JSON_DICT = { + "method": "Network.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def get_all_cookies() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Cookie]] +): + """ + Returns all browser cookies. Depending on the backend support, will return detailed cookie + information in the ``cookies`` field. + Deprecated. Use Storage.getCookies instead. + + .. deprecated:: 1.3 + + :returns: Array of cookie objects. + """ + cmd_dict: T_JSON_DICT = { + "method": "Network.getAllCookies", + } + json = yield cmd_dict + return [Cookie.from_json(i) for i in json["cookies"]]
+ + + +
+[docs] +def get_certificate( + origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Returns the DER-encoded certificate. + + **EXPERIMENTAL** + + :param origin: Origin to get certificate for. + :returns: + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Network.getCertificate", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["tableNames"]]
+ + + +
+[docs] +def get_cookies( + urls: typing.Optional[typing.List[str]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Cookie]]: + """ + Returns all browser cookies for the current URL. Depending on the backend support, will return + detailed cookie information in the ``cookies`` field. + + :param urls: *(Optional)* The list of URLs for which applicable cookies will be fetched. If not specified, it's assumed to be set to the list containing the URLs of the page and all of its subframes. + :returns: Array of cookie objects. + """ + params: T_JSON_DICT = dict() + if urls is not None: + params["urls"] = [i for i in urls] + cmd_dict: T_JSON_DICT = { + "method": "Network.getCookies", + "params": params, + } + json = yield cmd_dict + return [Cookie.from_json(i) for i in json["cookies"]]
+ + + +
+[docs] +def get_response_body( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]: + """ + Returns content served for the given request. + + :param request_id: Identifier of the network request to get content for. + :returns: A tuple with the following items: + + 0. **body** - Response body. + 1. **base64Encoded** - True, if content was sent as base64. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.getResponseBody", + "params": params, + } + json = yield cmd_dict + return (str(json["body"]), bool(json["base64Encoded"]))
+ + + +
+[docs] +def get_request_post_data( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns post data sent with the request. Returns an error when no data was sent with the request. + + :param request_id: Identifier of the network request to get content for. + :returns: Request body string, omitting files from multipart requests + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.getRequestPostData", + "params": params, + } + json = yield cmd_dict + return str(json["postData"])
+ + + +
+[docs] +def get_response_body_for_interception( + interception_id: InterceptionId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]: + """ + Returns content served for the given currently intercepted request. + + **EXPERIMENTAL** + + :param interception_id: Identifier for the intercepted request to get body for. + :returns: A tuple with the following items: + + 0. **body** - Response body. + 1. **base64Encoded** - True, if content was sent as base64. + """ + params: T_JSON_DICT = dict() + params["interceptionId"] = interception_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.getResponseBodyForInterception", + "params": params, + } + json = yield cmd_dict + return (str(json["body"]), bool(json["base64Encoded"]))
+ + + +
+[docs] +def take_response_body_for_interception_as_stream( + interception_id: InterceptionId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, io.StreamHandle]: + """ + Returns a handle to the stream representing the response body. Note that after this command, + the intercepted request can't be continued as is -- you either need to cancel it or to provide + the response body. The stream only supports sequential read, IO.read will fail if the position + is specified. + + **EXPERIMENTAL** + + :param interception_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["interceptionId"] = interception_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.takeResponseBodyForInterceptionAsStream", + "params": params, + } + json = yield cmd_dict + return io.StreamHandle.from_json(json["stream"])
+ + + +
+[docs] +def replay_xhr( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + This method sends a new XMLHttpRequest which is identical to the original one. The following + parameters should be identical: method, url, async, request body, extra headers, withCredentials + attribute, user, password. + + **EXPERIMENTAL** + + :param request_id: Identifier of XHR to replay. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.replayXHR", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def search_in_response_body( + request_id: RequestId, + query: str, + case_sensitive: typing.Optional[bool] = None, + is_regex: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[debugger.SearchMatch]]: + """ + Searches for given string in response content. + + **EXPERIMENTAL** + + :param request_id: Identifier of the network response to search. + :param query: String to search for. + :param case_sensitive: *(Optional)* If true, search is case sensitive. + :param is_regex: *(Optional)* If true, treats string parameter as regex. + :returns: List of search matches. + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + params["query"] = query + if case_sensitive is not None: + params["caseSensitive"] = case_sensitive + if is_regex is not None: + params["isRegex"] = is_regex + cmd_dict: T_JSON_DICT = { + "method": "Network.searchInResponseBody", + "params": params, + } + json = yield cmd_dict + return [debugger.SearchMatch.from_json(i) for i in json["result"]]
+ + + +
+[docs] +def set_blocked_ur_ls( + urls: typing.List[str], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Blocks URLs from loading. + + **EXPERIMENTAL** + + :param urls: URL patterns to block. Wildcards ('*') are allowed. + """ + params: T_JSON_DICT = dict() + params["urls"] = [i for i in urls] + cmd_dict: T_JSON_DICT = { + "method": "Network.setBlockedURLs", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_bypass_service_worker( + bypass: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Toggles ignoring of service worker for each request. + + :param bypass: Bypass service worker and load from network. + """ + params: T_JSON_DICT = dict() + params["bypass"] = bypass + cmd_dict: T_JSON_DICT = { + "method": "Network.setBypassServiceWorker", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_cache_disabled( + cache_disabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Toggles ignoring cache for each request. If ``true``, cache will not be used. + + :param cache_disabled: Cache disabled state. + """ + params: T_JSON_DICT = dict() + params["cacheDisabled"] = cache_disabled + cmd_dict: T_JSON_DICT = { + "method": "Network.setCacheDisabled", + "params": params, + } + json = yield cmd_dict
+ + + + + + + +
+[docs] +def set_cookies( + cookies: typing.List[CookieParam], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets given cookies. + + :param cookies: Cookies to be set. + """ + params: T_JSON_DICT = dict() + params["cookies"] = [i.to_json() for i in cookies] + cmd_dict: T_JSON_DICT = { + "method": "Network.setCookies", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_extra_http_headers( + headers: Headers, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Specifies whether to always send extra HTTP headers with the requests from this page. + + :param headers: Map with extra HTTP headers. + """ + params: T_JSON_DICT = dict() + params["headers"] = headers.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.setExtraHTTPHeaders", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_attach_debug_stack( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Specifies whether to attach a page script stack id in requests + + **EXPERIMENTAL** + + :param enabled: Whether to attach a page script stack for debugging purpose. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Network.setAttachDebugStack", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_request_interception( + patterns: typing.List[RequestPattern], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets the requests to intercept that match the provided patterns and optionally resource types. + Deprecated, please use Fetch.enable instead. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param patterns: Requests matching any of these patterns will be forwarded and wait for the corresponding continueInterceptedRequest call. + """ + params: T_JSON_DICT = dict() + params["patterns"] = [i.to_json() for i in patterns] + cmd_dict: T_JSON_DICT = { + "method": "Network.setRequestInterception", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_user_agent_override( + user_agent: str, + accept_language: typing.Optional[str] = None, + platform: typing.Optional[str] = None, + user_agent_metadata: typing.Optional[emulation.UserAgentMetadata] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Allows overriding user agent with the given string. + + :param user_agent: User agent to use. + :param accept_language: *(Optional)* Browser language to emulate. + :param platform: *(Optional)* The platform navigator.platform should return. + :param user_agent_metadata: **(EXPERIMENTAL)** *(Optional)* To be sent in Sec-CH-UA-* headers and returned in navigator.userAgentData + """ + params: T_JSON_DICT = dict() + params["userAgent"] = user_agent + if accept_language is not None: + params["acceptLanguage"] = accept_language + if platform is not None: + params["platform"] = platform + if user_agent_metadata is not None: + params["userAgentMetadata"] = user_agent_metadata.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.setUserAgentOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stream_resource_content( + request_id: RequestId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Enables streaming of the response for the given requestId. + If enabled, the dataReceived event contains the data that was received during streaming. + + **EXPERIMENTAL** + + :param request_id: Identifier of the request to stream. + :returns: Data that has been buffered until streaming is enabled. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + params["requestId"] = request_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.streamResourceContent", + "params": params, + } + json = yield cmd_dict + return str(json["bufferedData"])
+ + + +
+[docs] +def get_security_isolation_status( + frame_id: typing.Optional[page.FrameId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SecurityIsolationStatus]: + """ + Returns information about the COEP/COOP isolation status. + + **EXPERIMENTAL** + + :param frame_id: *(Optional)* If no frameId is provided, the status of the target is provided. + :returns: + """ + params: T_JSON_DICT = dict() + if frame_id is not None: + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.getSecurityIsolationStatus", + "params": params, + } + json = yield cmd_dict + return SecurityIsolationStatus.from_json(json["status"])
+ + + +
+[docs] +def enable_reporting_api( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables tracking for the Reporting API, events generated by the Reporting API will now be delivered to the client. + Enabling triggers 'reportingApiReportAdded' for all existing reports. + + **EXPERIMENTAL** + + :param enable: Whether to enable or disable events for the Reporting API + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Network.enableReportingApi", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def load_network_resource( + url: str, + options: LoadNetworkResourceOptions, + frame_id: typing.Optional[page.FrameId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, LoadNetworkResourcePageResult]: + """ + Fetches the resource and returns the content. + + **EXPERIMENTAL** + + :param frame_id: *(Optional)* Frame id to get the resource for. Mandatory for frame targets, and should be omitted for worker targets. + :param url: URL of the resource to get content for. + :param options: Options for the request. + :returns: + """ + params: T_JSON_DICT = dict() + if frame_id is not None: + params["frameId"] = frame_id.to_json() + params["url"] = url + params["options"] = options.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Network.loadNetworkResource", + "params": params, + } + json = yield cmd_dict + return LoadNetworkResourcePageResult.from_json(json["resource"])
+ + + +
+[docs] +@event_class("Network.dataReceived") +@dataclass +class DataReceived: + """ + Fired when data chunk was received over the network. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: Data chunk length. + data_length: int + #: Actual bytes received (might be less than dataLength for compressed encodings). + encoded_data_length: int + #: Data that was received. (Encoded as a base64 string when passed over JSON) + data: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DataReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + data_length=int(json["dataLength"]), + encoded_data_length=int(json["encodedDataLength"]), + data=str(json["data"]) if json.get("data", None) is not None else None, + )
+ + + +
+[docs] +@event_class("Network.eventSourceMessageReceived") +@dataclass +class EventSourceMessageReceived: + """ + Fired when EventSource message is received. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: Message type. + event_name: str + #: Message identifier. + event_id: str + #: Message content. + data: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> EventSourceMessageReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + event_name=str(json["eventName"]), + event_id=str(json["eventId"]), + data=str(json["data"]), + )
+ + + +
+[docs] +@event_class("Network.loadingFailed") +@dataclass +class LoadingFailed: + """ + Fired when HTTP request has failed to load. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: Resource type. + type_: ResourceType + #: Error message. List of network errors: https://cs.chromium.org/chromium/src/net/base/net_error_list.h + error_text: str + #: True if loading was canceled. + canceled: typing.Optional[bool] + #: The reason why loading was blocked, if any. + blocked_reason: typing.Optional[BlockedReason] + #: The reason why loading was blocked by CORS, if any. + cors_error_status: typing.Optional[CorsErrorStatus] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadingFailed: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + type_=ResourceType.from_json(json["type"]), + error_text=str(json["errorText"]), + canceled=( + bool(json["canceled"]) + if json.get("canceled", None) is not None + else None + ), + blocked_reason=( + BlockedReason.from_json(json["blockedReason"]) + if json.get("blockedReason", None) is not None + else None + ), + cors_error_status=( + CorsErrorStatus.from_json(json["corsErrorStatus"]) + if json.get("corsErrorStatus", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.loadingFinished") +@dataclass +class LoadingFinished: + """ + Fired when HTTP request has finished loading. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: Total number of bytes received for this request. + encoded_data_length: float + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadingFinished: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + encoded_data_length=float(json["encodedDataLength"]), + )
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Network.requestIntercepted") +@dataclass +class RequestIntercepted: + """ + **EXPERIMENTAL** + + Details of an intercepted HTTP request, which must be either allowed, blocked, modified or + mocked. + Deprecated, use Fetch.requestPaused instead. + """ + + #: Each request the page makes will have a unique id, however if any redirects are encountered + #: while processing that fetch, they will be reported with the same id as the original fetch. + #: Likewise if HTTP authentication is needed then the same fetch id will be used. + interception_id: InterceptionId + request: Request + #: The id of the frame that initiated the request. + frame_id: page.FrameId + #: How the requested resource will be used. + resource_type: ResourceType + #: Whether this is a navigation request, which can abort the navigation completely. + is_navigation_request: bool + #: Set if the request is a navigation that will result in a download. + #: Only present after response is received from the server (i.e. HeadersReceived stage). + is_download: typing.Optional[bool] + #: Redirect location, only sent if a redirect was intercepted. + redirect_url: typing.Optional[str] + #: Details of the Authorization Challenge encountered. If this is set then + #: continueInterceptedRequest must contain an authChallengeResponse. + auth_challenge: typing.Optional[AuthChallenge] + #: Response error if intercepted at response stage or if redirect occurred while intercepting + #: request. + response_error_reason: typing.Optional[ErrorReason] + #: Response code if intercepted at response stage or if redirect occurred while intercepting + #: request or auth retry occurred. + response_status_code: typing.Optional[int] + #: Response headers if intercepted at the response stage or if redirect occurred while + #: intercepting request or auth retry occurred. + response_headers: typing.Optional[Headers] + #: If the intercepted request had a corresponding requestWillBeSent event fired for it, then + #: this requestId will be the same as the requestId present in the requestWillBeSent event. + request_id: typing.Optional[RequestId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestIntercepted: + return cls( + interception_id=InterceptionId.from_json(json["interceptionId"]), + request=Request.from_json(json["request"]), + frame_id=page.FrameId.from_json(json["frameId"]), + resource_type=ResourceType.from_json(json["resourceType"]), + is_navigation_request=bool(json["isNavigationRequest"]), + is_download=( + bool(json["isDownload"]) + if json.get("isDownload", None) is not None + else None + ), + redirect_url=( + str(json["redirectUrl"]) + if json.get("redirectUrl", None) is not None + else None + ), + auth_challenge=( + AuthChallenge.from_json(json["authChallenge"]) + if json.get("authChallenge", None) is not None + else None + ), + response_error_reason=( + ErrorReason.from_json(json["responseErrorReason"]) + if json.get("responseErrorReason", None) is not None + else None + ), + response_status_code=( + int(json["responseStatusCode"]) + if json.get("responseStatusCode", None) is not None + else None + ), + response_headers=( + Headers.from_json(json["responseHeaders"]) + if json.get("responseHeaders", None) is not None + else None + ), + request_id=( + RequestId.from_json(json["requestId"]) + if json.get("requestId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.requestServedFromCache") +@dataclass +class RequestServedFromCache: + """ + Fired if request ended up loading from cache. + """ + + #: Request identifier. + request_id: RequestId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestServedFromCache: + return cls(request_id=RequestId.from_json(json["requestId"]))
+ + + +
+[docs] +@event_class("Network.requestWillBeSent") +@dataclass +class RequestWillBeSent: + """ + Fired when page is about to send HTTP request. + """ + + #: Request identifier. + request_id: RequestId + #: Loader identifier. Empty string if the request is fetched from worker. + loader_id: LoaderId + #: URL of the document this request is loaded for. + document_url: str + #: Request data. + request: Request + #: Timestamp. + timestamp: MonotonicTime + #: Timestamp. + wall_time: TimeSinceEpoch + #: Request initiator. + initiator: Initiator + #: In the case that redirectResponse is populated, this flag indicates whether + #: requestWillBeSentExtraInfo and responseReceivedExtraInfo events will be or were emitted + #: for the request which was just redirected. + redirect_has_extra_info: bool + #: Redirect response data. + redirect_response: typing.Optional[Response] + #: Type of this resource. + type_: typing.Optional[ResourceType] + #: Frame identifier. + frame_id: typing.Optional[page.FrameId] + #: Whether the request is initiated by a user gesture. Defaults to false. + has_user_gesture: typing.Optional[bool] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestWillBeSent: + return cls( + request_id=RequestId.from_json(json["requestId"]), + loader_id=LoaderId.from_json(json["loaderId"]), + document_url=str(json["documentURL"]), + request=Request.from_json(json["request"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + wall_time=TimeSinceEpoch.from_json(json["wallTime"]), + initiator=Initiator.from_json(json["initiator"]), + redirect_has_extra_info=bool(json["redirectHasExtraInfo"]), + redirect_response=( + Response.from_json(json["redirectResponse"]) + if json.get("redirectResponse", None) is not None + else None + ), + type_=( + ResourceType.from_json(json["type"]) + if json.get("type", None) is not None + else None + ), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + has_user_gesture=( + bool(json["hasUserGesture"]) + if json.get("hasUserGesture", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.resourceChangedPriority") +@dataclass +class ResourceChangedPriority: + """ + **EXPERIMENTAL** + + Fired when resource loading priority is changed + """ + + #: Request identifier. + request_id: RequestId + #: New priority + new_priority: ResourcePriority + #: Timestamp. + timestamp: MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResourceChangedPriority: + return cls( + request_id=RequestId.from_json(json["requestId"]), + new_priority=ResourcePriority.from_json(json["newPriority"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Network.signedExchangeReceived") +@dataclass +class SignedExchangeReceived: + """ + **EXPERIMENTAL** + + Fired when a signed exchange was received over the network + """ + + #: Request identifier. + request_id: RequestId + #: Information about the signed exchange response. + info: SignedExchangeInfo + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SignedExchangeReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + info=SignedExchangeInfo.from_json(json["info"]), + )
+ + + +
+[docs] +@event_class("Network.responseReceived") +@dataclass +class ResponseReceived: + """ + Fired when HTTP response is available. + """ + + #: Request identifier. + request_id: RequestId + #: Loader identifier. Empty string if the request is fetched from worker. + loader_id: LoaderId + #: Timestamp. + timestamp: MonotonicTime + #: Resource type. + type_: ResourceType + #: Response data. + response: Response + #: Indicates whether requestWillBeSentExtraInfo and responseReceivedExtraInfo events will be + #: or were emitted for this request. + has_extra_info: bool + #: Frame identifier. + frame_id: typing.Optional[page.FrameId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResponseReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + loader_id=LoaderId.from_json(json["loaderId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + type_=ResourceType.from_json(json["type"]), + response=Response.from_json(json["response"]), + has_extra_info=bool(json["hasExtraInfo"]), + frame_id=( + page.FrameId.from_json(json["frameId"]) + if json.get("frameId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.webSocketClosed") +@dataclass +class WebSocketClosed: + """ + Fired when WebSocket is closed. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketClosed: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Network.webSocketCreated") +@dataclass +class WebSocketCreated: + """ + Fired upon WebSocket creation. + """ + + #: Request identifier. + request_id: RequestId + #: WebSocket request URL. + url: str + #: Request initiator. + initiator: typing.Optional[Initiator] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketCreated: + return cls( + request_id=RequestId.from_json(json["requestId"]), + url=str(json["url"]), + initiator=( + Initiator.from_json(json["initiator"]) + if json.get("initiator", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.webSocketFrameError") +@dataclass +class WebSocketFrameError: + """ + Fired when WebSocket message error occurs. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: WebSocket error message. + error_message: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketFrameError: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + error_message=str(json["errorMessage"]), + )
+ + + +
+[docs] +@event_class("Network.webSocketFrameReceived") +@dataclass +class WebSocketFrameReceived: + """ + Fired when WebSocket message is received. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: WebSocket response data. + response: WebSocketFrame + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketFrameReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + response=WebSocketFrame.from_json(json["response"]), + )
+ + + +
+[docs] +@event_class("Network.webSocketFrameSent") +@dataclass +class WebSocketFrameSent: + """ + Fired when WebSocket message is sent. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: WebSocket response data. + response: WebSocketFrame + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketFrameSent: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + response=WebSocketFrame.from_json(json["response"]), + )
+ + + +
+[docs] +@event_class("Network.webSocketHandshakeResponseReceived") +@dataclass +class WebSocketHandshakeResponseReceived: + """ + Fired when WebSocket handshake response becomes available. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: WebSocket response data. + response: WebSocketResponse + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketHandshakeResponseReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + response=WebSocketResponse.from_json(json["response"]), + )
+ + + +
+[docs] +@event_class("Network.webSocketWillSendHandshakeRequest") +@dataclass +class WebSocketWillSendHandshakeRequest: + """ + Fired when WebSocket is about to initiate handshake. + """ + + #: Request identifier. + request_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + #: UTC Timestamp. + wall_time: TimeSinceEpoch + #: WebSocket request data. + request: WebSocketRequest + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebSocketWillSendHandshakeRequest: + return cls( + request_id=RequestId.from_json(json["requestId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + wall_time=TimeSinceEpoch.from_json(json["wallTime"]), + request=WebSocketRequest.from_json(json["request"]), + )
+ + + +
+[docs] +@event_class("Network.webTransportCreated") +@dataclass +class WebTransportCreated: + """ + Fired upon WebTransport creation. + """ + + #: WebTransport identifier. + transport_id: RequestId + #: WebTransport request URL. + url: str + #: Timestamp. + timestamp: MonotonicTime + #: Request initiator. + initiator: typing.Optional[Initiator] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebTransportCreated: + return cls( + transport_id=RequestId.from_json(json["transportId"]), + url=str(json["url"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + initiator=( + Initiator.from_json(json["initiator"]) + if json.get("initiator", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.webTransportConnectionEstablished") +@dataclass +class WebTransportConnectionEstablished: + """ + Fired when WebTransport handshake is finished. + """ + + #: WebTransport identifier. + transport_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebTransportConnectionEstablished: + return cls( + transport_id=RequestId.from_json(json["transportId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Network.webTransportClosed") +@dataclass +class WebTransportClosed: + """ + Fired when WebTransport is disposed. + """ + + #: WebTransport identifier. + transport_id: RequestId + #: Timestamp. + timestamp: MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebTransportClosed: + return cls( + transport_id=RequestId.from_json(json["transportId"]), + timestamp=MonotonicTime.from_json(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Network.requestWillBeSentExtraInfo") +@dataclass +class RequestWillBeSentExtraInfo: + """ + **EXPERIMENTAL** + + Fired when additional information about a requestWillBeSent event is available from the + network stack. Not every requestWillBeSent event will have an additional + requestWillBeSentExtraInfo fired for it, and there is no guarantee whether requestWillBeSent + or requestWillBeSentExtraInfo will be fired first for the same request. + """ + + #: Request identifier. Used to match this information to an existing requestWillBeSent event. + request_id: RequestId + #: A list of cookies potentially associated to the requested URL. This includes both cookies sent with + #: the request and the ones not sent; the latter are distinguished by having blockedReasons field set. + associated_cookies: typing.List[AssociatedCookie] + #: Raw request headers as they will be sent over the wire. + headers: Headers + #: Connection timing information for the request. + connect_timing: ConnectTiming + #: The client security state set for the request. + client_security_state: typing.Optional[ClientSecurityState] + #: Whether the site has partitioned cookies stored in a partition different than the current one. + site_has_cookie_in_other_partition: typing.Optional[bool] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RequestWillBeSentExtraInfo: + return cls( + request_id=RequestId.from_json(json["requestId"]), + associated_cookies=[ + AssociatedCookie.from_json(i) for i in json["associatedCookies"] + ], + headers=Headers.from_json(json["headers"]), + connect_timing=ConnectTiming.from_json(json["connectTiming"]), + client_security_state=( + ClientSecurityState.from_json(json["clientSecurityState"]) + if json.get("clientSecurityState", None) is not None + else None + ), + site_has_cookie_in_other_partition=( + bool(json["siteHasCookieInOtherPartition"]) + if json.get("siteHasCookieInOtherPartition", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.responseReceivedExtraInfo") +@dataclass +class ResponseReceivedExtraInfo: + """ + **EXPERIMENTAL** + + Fired when additional information about a responseReceived event is available from the network + stack. Not every responseReceived event will have an additional responseReceivedExtraInfo for + it, and responseReceivedExtraInfo may be fired before or after responseReceived. + """ + + #: Request identifier. Used to match this information to another responseReceived event. + request_id: RequestId + #: A list of cookies which were not stored from the response along with the corresponding + #: reasons for blocking. The cookies here may not be valid due to syntax errors, which + #: are represented by the invalid cookie line string instead of a proper cookie. + blocked_cookies: typing.List[BlockedSetCookieWithReason] + #: Raw response headers as they were received over the wire. + headers: Headers + #: The IP address space of the resource. The address space can only be determined once the transport + #: established the connection, so we can't send it in ``requestWillBeSentExtraInfo``. + resource_ip_address_space: IPAddressSpace + #: The status code of the response. This is useful in cases the request failed and no responseReceived + #: event is triggered, which is the case for, e.g., CORS errors. This is also the correct status code + #: for cached requests, where the status in responseReceived is a 200 and this will be 304. + status_code: int + #: Raw response header text as it was received over the wire. The raw text may not always be + #: available, such as in the case of HTTP/2 or QUIC. + headers_text: typing.Optional[str] + #: The cookie partition key that will be used to store partitioned cookies set in this response. + #: Only sent when partitioned cookies are enabled. + cookie_partition_key: typing.Optional[CookiePartitionKey] + #: True if partitioned cookies are enabled, but the partition key is not serializable to string. + cookie_partition_key_opaque: typing.Optional[bool] + #: A list of cookies which should have been blocked by 3PCD but are exempted and stored from + #: the response with the corresponding reason. + exempted_cookies: typing.Optional[typing.List[ExemptedSetCookieWithReason]] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResponseReceivedExtraInfo: + return cls( + request_id=RequestId.from_json(json["requestId"]), + blocked_cookies=[ + BlockedSetCookieWithReason.from_json(i) for i in json["blockedCookies"] + ], + headers=Headers.from_json(json["headers"]), + resource_ip_address_space=IPAddressSpace.from_json( + json["resourceIPAddressSpace"] + ), + status_code=int(json["statusCode"]), + headers_text=( + str(json["headersText"]) + if json.get("headersText", None) is not None + else None + ), + cookie_partition_key=( + CookiePartitionKey.from_json(json["cookiePartitionKey"]) + if json.get("cookiePartitionKey", None) is not None + else None + ), + cookie_partition_key_opaque=( + bool(json["cookiePartitionKeyOpaque"]) + if json.get("cookiePartitionKeyOpaque", None) is not None + else None + ), + exempted_cookies=( + [ + ExemptedSetCookieWithReason.from_json(i) + for i in json["exemptedCookies"] + ] + if json.get("exemptedCookies", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.responseReceivedEarlyHints") +@dataclass +class ResponseReceivedEarlyHints: + """ + **EXPERIMENTAL** + + Fired when 103 Early Hints headers is received in addition to the common response. + Not every responseReceived event will have an responseReceivedEarlyHints fired. + Only one responseReceivedEarlyHints may be fired for eached responseReceived event. + """ + + #: Request identifier. Used to match this information to another responseReceived event. + request_id: RequestId + #: Raw response headers as they were received over the wire. + headers: Headers + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ResponseReceivedEarlyHints: + return cls( + request_id=RequestId.from_json(json["requestId"]), + headers=Headers.from_json(json["headers"]), + )
+ + + +
+[docs] +@event_class("Network.trustTokenOperationDone") +@dataclass +class TrustTokenOperationDone: + """ + **EXPERIMENTAL** + + Fired exactly once for each Trust Token operation. Depending on + the type of the operation and whether the operation succeeded or + failed, the event is fired before the corresponding request was sent + or after the response was received. + """ + + #: Detailed success or error status of the operation. + #: 'AlreadyExists' also signifies a successful operation, as the result + #: of the operation already exists und thus, the operation was abort + #: preemptively (e.g. a cache hit). + status: str + type_: TrustTokenOperationType + request_id: RequestId + #: Top level origin. The context in which the operation was attempted. + top_level_origin: typing.Optional[str] + #: Origin of the issuer in case of a "Issuance" or "Redemption" operation. + issuer_origin: typing.Optional[str] + #: The number of obtained Trust Tokens on a successful "Issuance" operation. + issued_token_count: typing.Optional[int] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TrustTokenOperationDone: + return cls( + status=str(json["status"]), + type_=TrustTokenOperationType.from_json(json["type"]), + request_id=RequestId.from_json(json["requestId"]), + top_level_origin=( + str(json["topLevelOrigin"]) + if json.get("topLevelOrigin", None) is not None + else None + ), + issuer_origin=( + str(json["issuerOrigin"]) + if json.get("issuerOrigin", None) is not None + else None + ), + issued_token_count=( + int(json["issuedTokenCount"]) + if json.get("issuedTokenCount", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.policyUpdated") +@dataclass +class PolicyUpdated: + """ + **EXPERIMENTAL** + + Fired once security policy has been updated. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PolicyUpdated: + return cls()
+ + + +
+[docs] +@event_class("Network.subresourceWebBundleMetadataReceived") +@dataclass +class SubresourceWebBundleMetadataReceived: + """ + **EXPERIMENTAL** + + Fired once when parsing the .wbn file has succeeded. + The event contains the information about the web bundle contents. + """ + + #: Request identifier. Used to match this information to another event. + request_id: RequestId + #: A list of URLs of resources in the subresource Web Bundle. + urls: typing.List[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SubresourceWebBundleMetadataReceived: + return cls( + request_id=RequestId.from_json(json["requestId"]), + urls=[str(i) for i in json["urls"]], + )
+ + + +
+[docs] +@event_class("Network.subresourceWebBundleMetadataError") +@dataclass +class SubresourceWebBundleMetadataError: + """ + **EXPERIMENTAL** + + Fired once when parsing the .wbn file has failed. + """ + + #: Request identifier. Used to match this information to another event. + request_id: RequestId + #: Error message + error_message: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SubresourceWebBundleMetadataError: + return cls( + request_id=RequestId.from_json(json["requestId"]), + error_message=str(json["errorMessage"]), + )
+ + + +
+[docs] +@event_class("Network.subresourceWebBundleInnerResponseParsed") +@dataclass +class SubresourceWebBundleInnerResponseParsed: + """ + **EXPERIMENTAL** + + Fired when handling requests for resources within a .wbn file. + Note: this will only be fired for resources that are requested by the webpage. + """ + + #: Request identifier of the subresource request + inner_request_id: RequestId + #: URL of the subresource resource. + inner_request_url: str + #: Bundle request identifier. Used to match this information to another event. + #: This made be absent in case when the instrumentation was enabled only + #: after webbundle was parsed. + bundle_request_id: typing.Optional[RequestId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SubresourceWebBundleInnerResponseParsed: + return cls( + inner_request_id=RequestId.from_json(json["innerRequestId"]), + inner_request_url=str(json["innerRequestURL"]), + bundle_request_id=( + RequestId.from_json(json["bundleRequestId"]) + if json.get("bundleRequestId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.subresourceWebBundleInnerResponseError") +@dataclass +class SubresourceWebBundleInnerResponseError: + """ + **EXPERIMENTAL** + + Fired when request for resources within a .wbn file failed. + """ + + #: Request identifier of the subresource request + inner_request_id: RequestId + #: URL of the subresource resource. + inner_request_url: str + #: Error message + error_message: str + #: Bundle request identifier. Used to match this information to another event. + #: This made be absent in case when the instrumentation was enabled only + #: after webbundle was parsed. + bundle_request_id: typing.Optional[RequestId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SubresourceWebBundleInnerResponseError: + return cls( + inner_request_id=RequestId.from_json(json["innerRequestId"]), + inner_request_url=str(json["innerRequestURL"]), + error_message=str(json["errorMessage"]), + bundle_request_id=( + RequestId.from_json(json["bundleRequestId"]) + if json.get("bundleRequestId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Network.reportingApiReportAdded") +@dataclass +class ReportingApiReportAdded: + """ + **EXPERIMENTAL** + + Is sent whenever a new report is added. + And after 'enableReportingApi' for all existing reports. + """ + + report: ReportingApiReport + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportingApiReportAdded: + return cls(report=ReportingApiReport.from_json(json["report"]))
+ + + +
+[docs] +@event_class("Network.reportingApiReportUpdated") +@dataclass +class ReportingApiReportUpdated: + """ + **EXPERIMENTAL** + + + """ + + report: ReportingApiReport + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportingApiReportUpdated: + return cls(report=ReportingApiReport.from_json(json["report"]))
+ + + +
+[docs] +@event_class("Network.reportingApiEndpointsChangedForOrigin") +@dataclass +class ReportingApiEndpointsChangedForOrigin: + """ + **EXPERIMENTAL** + + + """ + + #: Origin of the document(s) which configured the endpoints. + origin: str + endpoints: typing.List[ReportingApiEndpoint] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReportingApiEndpointsChangedForOrigin: + return cls( + origin=str(json["origin"]), + endpoints=[ReportingApiEndpoint.from_json(i) for i in json["endpoints"]], + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/overlay.html b/docs/_build/html/_modules/nodriver/cdp/overlay.html new file mode 100644 index 0000000..9f675ca --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/overlay.html @@ -0,0 +1,2152 @@ + + + + + + + + nodriver.cdp.overlay - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.overlay

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Overlay (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import page
+from . import runtime
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +@dataclass +class SourceOrderConfig: + """ + Configuration data for drawing the source order of an elements children. + """ + + #: the color to outline the given element in. + parent_outline_color: dom.RGBA + + #: the color to outline the child elements in. + child_outline_color: dom.RGBA + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["parentOutlineColor"] = self.parent_outline_color.to_json() + json["childOutlineColor"] = self.child_outline_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SourceOrderConfig: + return cls( + parent_outline_color=dom.RGBA.from_json(json["parentOutlineColor"]), + child_outline_color=dom.RGBA.from_json(json["childOutlineColor"]), + )
+ + + +
+[docs] +@dataclass +class GridHighlightConfig: + """ + Configuration data for the highlighting of Grid elements. + """ + + #: Whether the extension lines from grid cells to the rulers should be shown (default: false). + show_grid_extension_lines: typing.Optional[bool] = None + + #: Show Positive line number labels (default: false). + show_positive_line_numbers: typing.Optional[bool] = None + + #: Show Negative line number labels (default: false). + show_negative_line_numbers: typing.Optional[bool] = None + + #: Show area name labels (default: false). + show_area_names: typing.Optional[bool] = None + + #: Show line name labels (default: false). + show_line_names: typing.Optional[bool] = None + + #: Show track size labels (default: false). + show_track_sizes: typing.Optional[bool] = None + + #: The grid container border highlight color (default: transparent). + grid_border_color: typing.Optional[dom.RGBA] = None + + #: The cell border color (default: transparent). Deprecated, please use rowLineColor and columnLineColor instead. + cell_border_color: typing.Optional[dom.RGBA] = None + + #: The row line color (default: transparent). + row_line_color: typing.Optional[dom.RGBA] = None + + #: The column line color (default: transparent). + column_line_color: typing.Optional[dom.RGBA] = None + + #: Whether the grid border is dashed (default: false). + grid_border_dash: typing.Optional[bool] = None + + #: Whether the cell border is dashed (default: false). Deprecated, please us rowLineDash and columnLineDash instead. + cell_border_dash: typing.Optional[bool] = None + + #: Whether row lines are dashed (default: false). + row_line_dash: typing.Optional[bool] = None + + #: Whether column lines are dashed (default: false). + column_line_dash: typing.Optional[bool] = None + + #: The row gap highlight fill color (default: transparent). + row_gap_color: typing.Optional[dom.RGBA] = None + + #: The row gap hatching fill color (default: transparent). + row_hatch_color: typing.Optional[dom.RGBA] = None + + #: The column gap highlight fill color (default: transparent). + column_gap_color: typing.Optional[dom.RGBA] = None + + #: The column gap hatching fill color (default: transparent). + column_hatch_color: typing.Optional[dom.RGBA] = None + + #: The named grid areas border color (Default: transparent). + area_border_color: typing.Optional[dom.RGBA] = None + + #: The grid container background color (Default: transparent). + grid_background_color: typing.Optional[dom.RGBA] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.show_grid_extension_lines is not None: + json["showGridExtensionLines"] = self.show_grid_extension_lines + if self.show_positive_line_numbers is not None: + json["showPositiveLineNumbers"] = self.show_positive_line_numbers + if self.show_negative_line_numbers is not None: + json["showNegativeLineNumbers"] = self.show_negative_line_numbers + if self.show_area_names is not None: + json["showAreaNames"] = self.show_area_names + if self.show_line_names is not None: + json["showLineNames"] = self.show_line_names + if self.show_track_sizes is not None: + json["showTrackSizes"] = self.show_track_sizes + if self.grid_border_color is not None: + json["gridBorderColor"] = self.grid_border_color.to_json() + if self.cell_border_color is not None: + json["cellBorderColor"] = self.cell_border_color.to_json() + if self.row_line_color is not None: + json["rowLineColor"] = self.row_line_color.to_json() + if self.column_line_color is not None: + json["columnLineColor"] = self.column_line_color.to_json() + if self.grid_border_dash is not None: + json["gridBorderDash"] = self.grid_border_dash + if self.cell_border_dash is not None: + json["cellBorderDash"] = self.cell_border_dash + if self.row_line_dash is not None: + json["rowLineDash"] = self.row_line_dash + if self.column_line_dash is not None: + json["columnLineDash"] = self.column_line_dash + if self.row_gap_color is not None: + json["rowGapColor"] = self.row_gap_color.to_json() + if self.row_hatch_color is not None: + json["rowHatchColor"] = self.row_hatch_color.to_json() + if self.column_gap_color is not None: + json["columnGapColor"] = self.column_gap_color.to_json() + if self.column_hatch_color is not None: + json["columnHatchColor"] = self.column_hatch_color.to_json() + if self.area_border_color is not None: + json["areaBorderColor"] = self.area_border_color.to_json() + if self.grid_background_color is not None: + json["gridBackgroundColor"] = self.grid_background_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> GridHighlightConfig: + return cls( + show_grid_extension_lines=( + bool(json["showGridExtensionLines"]) + if json.get("showGridExtensionLines", None) is not None + else None + ), + show_positive_line_numbers=( + bool(json["showPositiveLineNumbers"]) + if json.get("showPositiveLineNumbers", None) is not None + else None + ), + show_negative_line_numbers=( + bool(json["showNegativeLineNumbers"]) + if json.get("showNegativeLineNumbers", None) is not None + else None + ), + show_area_names=( + bool(json["showAreaNames"]) + if json.get("showAreaNames", None) is not None + else None + ), + show_line_names=( + bool(json["showLineNames"]) + if json.get("showLineNames", None) is not None + else None + ), + show_track_sizes=( + bool(json["showTrackSizes"]) + if json.get("showTrackSizes", None) is not None + else None + ), + grid_border_color=( + dom.RGBA.from_json(json["gridBorderColor"]) + if json.get("gridBorderColor", None) is not None + else None + ), + cell_border_color=( + dom.RGBA.from_json(json["cellBorderColor"]) + if json.get("cellBorderColor", None) is not None + else None + ), + row_line_color=( + dom.RGBA.from_json(json["rowLineColor"]) + if json.get("rowLineColor", None) is not None + else None + ), + column_line_color=( + dom.RGBA.from_json(json["columnLineColor"]) + if json.get("columnLineColor", None) is not None + else None + ), + grid_border_dash=( + bool(json["gridBorderDash"]) + if json.get("gridBorderDash", None) is not None + else None + ), + cell_border_dash=( + bool(json["cellBorderDash"]) + if json.get("cellBorderDash", None) is not None + else None + ), + row_line_dash=( + bool(json["rowLineDash"]) + if json.get("rowLineDash", None) is not None + else None + ), + column_line_dash=( + bool(json["columnLineDash"]) + if json.get("columnLineDash", None) is not None + else None + ), + row_gap_color=( + dom.RGBA.from_json(json["rowGapColor"]) + if json.get("rowGapColor", None) is not None + else None + ), + row_hatch_color=( + dom.RGBA.from_json(json["rowHatchColor"]) + if json.get("rowHatchColor", None) is not None + else None + ), + column_gap_color=( + dom.RGBA.from_json(json["columnGapColor"]) + if json.get("columnGapColor", None) is not None + else None + ), + column_hatch_color=( + dom.RGBA.from_json(json["columnHatchColor"]) + if json.get("columnHatchColor", None) is not None + else None + ), + area_border_color=( + dom.RGBA.from_json(json["areaBorderColor"]) + if json.get("areaBorderColor", None) is not None + else None + ), + grid_background_color=( + dom.RGBA.from_json(json["gridBackgroundColor"]) + if json.get("gridBackgroundColor", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FlexContainerHighlightConfig: + """ + Configuration data for the highlighting of Flex container elements. + """ + + #: The style of the container border + container_border: typing.Optional[LineStyle] = None + + #: The style of the separator between lines + line_separator: typing.Optional[LineStyle] = None + + #: The style of the separator between items + item_separator: typing.Optional[LineStyle] = None + + #: Style of content-distribution space on the main axis (justify-content). + main_distributed_space: typing.Optional[BoxStyle] = None + + #: Style of content-distribution space on the cross axis (align-content). + cross_distributed_space: typing.Optional[BoxStyle] = None + + #: Style of empty space caused by row gaps (gap/row-gap). + row_gap_space: typing.Optional[BoxStyle] = None + + #: Style of empty space caused by columns gaps (gap/column-gap). + column_gap_space: typing.Optional[BoxStyle] = None + + #: Style of the self-alignment line (align-items). + cross_alignment: typing.Optional[LineStyle] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.container_border is not None: + json["containerBorder"] = self.container_border.to_json() + if self.line_separator is not None: + json["lineSeparator"] = self.line_separator.to_json() + if self.item_separator is not None: + json["itemSeparator"] = self.item_separator.to_json() + if self.main_distributed_space is not None: + json["mainDistributedSpace"] = self.main_distributed_space.to_json() + if self.cross_distributed_space is not None: + json["crossDistributedSpace"] = self.cross_distributed_space.to_json() + if self.row_gap_space is not None: + json["rowGapSpace"] = self.row_gap_space.to_json() + if self.column_gap_space is not None: + json["columnGapSpace"] = self.column_gap_space.to_json() + if self.cross_alignment is not None: + json["crossAlignment"] = self.cross_alignment.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FlexContainerHighlightConfig: + return cls( + container_border=( + LineStyle.from_json(json["containerBorder"]) + if json.get("containerBorder", None) is not None + else None + ), + line_separator=( + LineStyle.from_json(json["lineSeparator"]) + if json.get("lineSeparator", None) is not None + else None + ), + item_separator=( + LineStyle.from_json(json["itemSeparator"]) + if json.get("itemSeparator", None) is not None + else None + ), + main_distributed_space=( + BoxStyle.from_json(json["mainDistributedSpace"]) + if json.get("mainDistributedSpace", None) is not None + else None + ), + cross_distributed_space=( + BoxStyle.from_json(json["crossDistributedSpace"]) + if json.get("crossDistributedSpace", None) is not None + else None + ), + row_gap_space=( + BoxStyle.from_json(json["rowGapSpace"]) + if json.get("rowGapSpace", None) is not None + else None + ), + column_gap_space=( + BoxStyle.from_json(json["columnGapSpace"]) + if json.get("columnGapSpace", None) is not None + else None + ), + cross_alignment=( + LineStyle.from_json(json["crossAlignment"]) + if json.get("crossAlignment", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FlexItemHighlightConfig: + """ + Configuration data for the highlighting of Flex item elements. + """ + + #: Style of the box representing the item's base size + base_size_box: typing.Optional[BoxStyle] = None + + #: Style of the border around the box representing the item's base size + base_size_border: typing.Optional[LineStyle] = None + + #: Style of the arrow representing if the item grew or shrank + flexibility_arrow: typing.Optional[LineStyle] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.base_size_box is not None: + json["baseSizeBox"] = self.base_size_box.to_json() + if self.base_size_border is not None: + json["baseSizeBorder"] = self.base_size_border.to_json() + if self.flexibility_arrow is not None: + json["flexibilityArrow"] = self.flexibility_arrow.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FlexItemHighlightConfig: + return cls( + base_size_box=( + BoxStyle.from_json(json["baseSizeBox"]) + if json.get("baseSizeBox", None) is not None + else None + ), + base_size_border=( + LineStyle.from_json(json["baseSizeBorder"]) + if json.get("baseSizeBorder", None) is not None + else None + ), + flexibility_arrow=( + LineStyle.from_json(json["flexibilityArrow"]) + if json.get("flexibilityArrow", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class LineStyle: + """ + Style information for drawing a line. + """ + + #: The color of the line (default: transparent) + color: typing.Optional[dom.RGBA] = None + + #: The line pattern (default: solid) + pattern: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.color is not None: + json["color"] = self.color.to_json() + if self.pattern is not None: + json["pattern"] = self.pattern + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LineStyle: + return cls( + color=( + dom.RGBA.from_json(json["color"]) + if json.get("color", None) is not None + else None + ), + pattern=( + str(json["pattern"]) if json.get("pattern", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class BoxStyle: + """ + Style information for drawing a box. + """ + + #: The background color for the box (default: transparent) + fill_color: typing.Optional[dom.RGBA] = None + + #: The hatching color for the box (default: transparent) + hatch_color: typing.Optional[dom.RGBA] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.fill_color is not None: + json["fillColor"] = self.fill_color.to_json() + if self.hatch_color is not None: + json["hatchColor"] = self.hatch_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BoxStyle: + return cls( + fill_color=( + dom.RGBA.from_json(json["fillColor"]) + if json.get("fillColor", None) is not None + else None + ), + hatch_color=( + dom.RGBA.from_json(json["hatchColor"]) + if json.get("hatchColor", None) is not None + else None + ), + )
+ + + +
+[docs] +class ContrastAlgorithm(enum.Enum): + AA = "aa" + AAA = "aaa" + APCA = "apca" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContrastAlgorithm: + return cls(json)
+ + + +
+[docs] +@dataclass +class HighlightConfig: + """ + Configuration data for the highlighting of page elements. + """ + + #: Whether the node info tooltip should be shown (default: false). + show_info: typing.Optional[bool] = None + + #: Whether the node styles in the tooltip (default: false). + show_styles: typing.Optional[bool] = None + + #: Whether the rulers should be shown (default: false). + show_rulers: typing.Optional[bool] = None + + #: Whether the a11y info should be shown (default: true). + show_accessibility_info: typing.Optional[bool] = None + + #: Whether the extension lines from node to the rulers should be shown (default: false). + show_extension_lines: typing.Optional[bool] = None + + #: The content box highlight fill color (default: transparent). + content_color: typing.Optional[dom.RGBA] = None + + #: The padding highlight fill color (default: transparent). + padding_color: typing.Optional[dom.RGBA] = None + + #: The border highlight fill color (default: transparent). + border_color: typing.Optional[dom.RGBA] = None + + #: The margin highlight fill color (default: transparent). + margin_color: typing.Optional[dom.RGBA] = None + + #: The event target element highlight fill color (default: transparent). + event_target_color: typing.Optional[dom.RGBA] = None + + #: The shape outside fill color (default: transparent). + shape_color: typing.Optional[dom.RGBA] = None + + #: The shape margin fill color (default: transparent). + shape_margin_color: typing.Optional[dom.RGBA] = None + + #: The grid layout color (default: transparent). + css_grid_color: typing.Optional[dom.RGBA] = None + + #: The color format used to format color styles (default: hex). + color_format: typing.Optional[ColorFormat] = None + + #: The grid layout highlight configuration (default: all transparent). + grid_highlight_config: typing.Optional[GridHighlightConfig] = None + + #: The flex container highlight configuration (default: all transparent). + flex_container_highlight_config: typing.Optional[FlexContainerHighlightConfig] = ( + None + ) + + #: The flex item highlight configuration (default: all transparent). + flex_item_highlight_config: typing.Optional[FlexItemHighlightConfig] = None + + #: The contrast algorithm to use for the contrast ratio (default: aa). + contrast_algorithm: typing.Optional[ContrastAlgorithm] = None + + #: The container query container highlight configuration (default: all transparent). + container_query_container_highlight_config: typing.Optional[ + ContainerQueryContainerHighlightConfig + ] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.show_info is not None: + json["showInfo"] = self.show_info + if self.show_styles is not None: + json["showStyles"] = self.show_styles + if self.show_rulers is not None: + json["showRulers"] = self.show_rulers + if self.show_accessibility_info is not None: + json["showAccessibilityInfo"] = self.show_accessibility_info + if self.show_extension_lines is not None: + json["showExtensionLines"] = self.show_extension_lines + if self.content_color is not None: + json["contentColor"] = self.content_color.to_json() + if self.padding_color is not None: + json["paddingColor"] = self.padding_color.to_json() + if self.border_color is not None: + json["borderColor"] = self.border_color.to_json() + if self.margin_color is not None: + json["marginColor"] = self.margin_color.to_json() + if self.event_target_color is not None: + json["eventTargetColor"] = self.event_target_color.to_json() + if self.shape_color is not None: + json["shapeColor"] = self.shape_color.to_json() + if self.shape_margin_color is not None: + json["shapeMarginColor"] = self.shape_margin_color.to_json() + if self.css_grid_color is not None: + json["cssGridColor"] = self.css_grid_color.to_json() + if self.color_format is not None: + json["colorFormat"] = self.color_format.to_json() + if self.grid_highlight_config is not None: + json["gridHighlightConfig"] = self.grid_highlight_config.to_json() + if self.flex_container_highlight_config is not None: + json["flexContainerHighlightConfig"] = ( + self.flex_container_highlight_config.to_json() + ) + if self.flex_item_highlight_config is not None: + json["flexItemHighlightConfig"] = self.flex_item_highlight_config.to_json() + if self.contrast_algorithm is not None: + json["contrastAlgorithm"] = self.contrast_algorithm.to_json() + if self.container_query_container_highlight_config is not None: + json["containerQueryContainerHighlightConfig"] = ( + self.container_query_container_highlight_config.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> HighlightConfig: + return cls( + show_info=( + bool(json["showInfo"]) + if json.get("showInfo", None) is not None + else None + ), + show_styles=( + bool(json["showStyles"]) + if json.get("showStyles", None) is not None + else None + ), + show_rulers=( + bool(json["showRulers"]) + if json.get("showRulers", None) is not None + else None + ), + show_accessibility_info=( + bool(json["showAccessibilityInfo"]) + if json.get("showAccessibilityInfo", None) is not None + else None + ), + show_extension_lines=( + bool(json["showExtensionLines"]) + if json.get("showExtensionLines", None) is not None + else None + ), + content_color=( + dom.RGBA.from_json(json["contentColor"]) + if json.get("contentColor", None) is not None + else None + ), + padding_color=( + dom.RGBA.from_json(json["paddingColor"]) + if json.get("paddingColor", None) is not None + else None + ), + border_color=( + dom.RGBA.from_json(json["borderColor"]) + if json.get("borderColor", None) is not None + else None + ), + margin_color=( + dom.RGBA.from_json(json["marginColor"]) + if json.get("marginColor", None) is not None + else None + ), + event_target_color=( + dom.RGBA.from_json(json["eventTargetColor"]) + if json.get("eventTargetColor", None) is not None + else None + ), + shape_color=( + dom.RGBA.from_json(json["shapeColor"]) + if json.get("shapeColor", None) is not None + else None + ), + shape_margin_color=( + dom.RGBA.from_json(json["shapeMarginColor"]) + if json.get("shapeMarginColor", None) is not None + else None + ), + css_grid_color=( + dom.RGBA.from_json(json["cssGridColor"]) + if json.get("cssGridColor", None) is not None + else None + ), + color_format=( + ColorFormat.from_json(json["colorFormat"]) + if json.get("colorFormat", None) is not None + else None + ), + grid_highlight_config=( + GridHighlightConfig.from_json(json["gridHighlightConfig"]) + if json.get("gridHighlightConfig", None) is not None + else None + ), + flex_container_highlight_config=( + FlexContainerHighlightConfig.from_json( + json["flexContainerHighlightConfig"] + ) + if json.get("flexContainerHighlightConfig", None) is not None + else None + ), + flex_item_highlight_config=( + FlexItemHighlightConfig.from_json(json["flexItemHighlightConfig"]) + if json.get("flexItemHighlightConfig", None) is not None + else None + ), + contrast_algorithm=( + ContrastAlgorithm.from_json(json["contrastAlgorithm"]) + if json.get("contrastAlgorithm", None) is not None + else None + ), + container_query_container_highlight_config=( + ContainerQueryContainerHighlightConfig.from_json( + json["containerQueryContainerHighlightConfig"] + ) + if json.get("containerQueryContainerHighlightConfig", None) is not None + else None + ), + )
+ + + +
+[docs] +class ColorFormat(enum.Enum): + RGB = "rgb" + HSL = "hsl" + HWB = "hwb" + HEX_ = "hex" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ColorFormat: + return cls(json)
+ + + +
+[docs] +@dataclass +class GridNodeHighlightConfig: + """ + Configurations for Persistent Grid Highlight + """ + + #: A descriptor for the highlight appearance. + grid_highlight_config: GridHighlightConfig + + #: Identifier of the node to highlight. + node_id: dom.NodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["gridHighlightConfig"] = self.grid_highlight_config.to_json() + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> GridNodeHighlightConfig: + return cls( + grid_highlight_config=GridHighlightConfig.from_json( + json["gridHighlightConfig"] + ), + node_id=dom.NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@dataclass +class FlexNodeHighlightConfig: + #: A descriptor for the highlight appearance of flex containers. + flex_container_highlight_config: FlexContainerHighlightConfig + + #: Identifier of the node to highlight. + node_id: dom.NodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["flexContainerHighlightConfig"] = ( + self.flex_container_highlight_config.to_json() + ) + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FlexNodeHighlightConfig: + return cls( + flex_container_highlight_config=FlexContainerHighlightConfig.from_json( + json["flexContainerHighlightConfig"] + ), + node_id=dom.NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@dataclass +class ScrollSnapContainerHighlightConfig: + #: The style of the snapport border (default: transparent) + snapport_border: typing.Optional[LineStyle] = None + + #: The style of the snap area border (default: transparent) + snap_area_border: typing.Optional[LineStyle] = None + + #: The margin highlight fill color (default: transparent). + scroll_margin_color: typing.Optional[dom.RGBA] = None + + #: The padding highlight fill color (default: transparent). + scroll_padding_color: typing.Optional[dom.RGBA] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.snapport_border is not None: + json["snapportBorder"] = self.snapport_border.to_json() + if self.snap_area_border is not None: + json["snapAreaBorder"] = self.snap_area_border.to_json() + if self.scroll_margin_color is not None: + json["scrollMarginColor"] = self.scroll_margin_color.to_json() + if self.scroll_padding_color is not None: + json["scrollPaddingColor"] = self.scroll_padding_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScrollSnapContainerHighlightConfig: + return cls( + snapport_border=( + LineStyle.from_json(json["snapportBorder"]) + if json.get("snapportBorder", None) is not None + else None + ), + snap_area_border=( + LineStyle.from_json(json["snapAreaBorder"]) + if json.get("snapAreaBorder", None) is not None + else None + ), + scroll_margin_color=( + dom.RGBA.from_json(json["scrollMarginColor"]) + if json.get("scrollMarginColor", None) is not None + else None + ), + scroll_padding_color=( + dom.RGBA.from_json(json["scrollPaddingColor"]) + if json.get("scrollPaddingColor", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ScrollSnapHighlightConfig: + #: A descriptor for the highlight appearance of scroll snap containers. + scroll_snap_container_highlight_config: ScrollSnapContainerHighlightConfig + + #: Identifier of the node to highlight. + node_id: dom.NodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scrollSnapContainerHighlightConfig"] = ( + self.scroll_snap_container_highlight_config.to_json() + ) + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScrollSnapHighlightConfig: + return cls( + scroll_snap_container_highlight_config=ScrollSnapContainerHighlightConfig.from_json( + json["scrollSnapContainerHighlightConfig"] + ), + node_id=dom.NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@dataclass +class HingeConfig: + """ + Configuration for dual screen hinge + """ + + #: A rectangle represent hinge + rect: dom.Rect + + #: The content box highlight fill color (default: a dark color). + content_color: typing.Optional[dom.RGBA] = None + + #: The content box highlight outline color (default: transparent). + outline_color: typing.Optional[dom.RGBA] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["rect"] = self.rect.to_json() + if self.content_color is not None: + json["contentColor"] = self.content_color.to_json() + if self.outline_color is not None: + json["outlineColor"] = self.outline_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> HingeConfig: + return cls( + rect=dom.Rect.from_json(json["rect"]), + content_color=( + dom.RGBA.from_json(json["contentColor"]) + if json.get("contentColor", None) is not None + else None + ), + outline_color=( + dom.RGBA.from_json(json["outlineColor"]) + if json.get("outlineColor", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class WindowControlsOverlayConfig: + """ + Configuration for Window Controls Overlay + """ + + #: Whether the title bar CSS should be shown when emulating the Window Controls Overlay. + show_css: bool + + #: Selected platforms to show the overlay. + selected_platform: str + + #: The theme color defined in app manifest. + theme_color: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["showCSS"] = self.show_css + json["selectedPlatform"] = self.selected_platform + json["themeColor"] = self.theme_color + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WindowControlsOverlayConfig: + return cls( + show_css=bool(json["showCSS"]), + selected_platform=str(json["selectedPlatform"]), + theme_color=str(json["themeColor"]), + )
+ + + +
+[docs] +@dataclass +class ContainerQueryHighlightConfig: + #: A descriptor for the highlight appearance of container query containers. + container_query_container_highlight_config: ContainerQueryContainerHighlightConfig + + #: Identifier of the container node to highlight. + node_id: dom.NodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["containerQueryContainerHighlightConfig"] = ( + self.container_query_container_highlight_config.to_json() + ) + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContainerQueryHighlightConfig: + return cls( + container_query_container_highlight_config=ContainerQueryContainerHighlightConfig.from_json( + json["containerQueryContainerHighlightConfig"] + ), + node_id=dom.NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@dataclass +class ContainerQueryContainerHighlightConfig: + #: The style of the container border. + container_border: typing.Optional[LineStyle] = None + + #: The style of the descendants' borders. + descendant_border: typing.Optional[LineStyle] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.container_border is not None: + json["containerBorder"] = self.container_border.to_json() + if self.descendant_border is not None: + json["descendantBorder"] = self.descendant_border.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContainerQueryContainerHighlightConfig: + return cls( + container_border=( + LineStyle.from_json(json["containerBorder"]) + if json.get("containerBorder", None) is not None + else None + ), + descendant_border=( + LineStyle.from_json(json["descendantBorder"]) + if json.get("descendantBorder", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class IsolatedElementHighlightConfig: + #: A descriptor for the highlight appearance of an element in isolation mode. + isolation_mode_highlight_config: IsolationModeHighlightConfig + + #: Identifier of the isolated element to highlight. + node_id: dom.NodeId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["isolationModeHighlightConfig"] = ( + self.isolation_mode_highlight_config.to_json() + ) + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IsolatedElementHighlightConfig: + return cls( + isolation_mode_highlight_config=IsolationModeHighlightConfig.from_json( + json["isolationModeHighlightConfig"] + ), + node_id=dom.NodeId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@dataclass +class IsolationModeHighlightConfig: + #: The fill color of the resizers (default: transparent). + resizer_color: typing.Optional[dom.RGBA] = None + + #: The fill color for resizer handles (default: transparent). + resizer_handle_color: typing.Optional[dom.RGBA] = None + + #: The fill color for the mask covering non-isolated elements (default: transparent). + mask_color: typing.Optional[dom.RGBA] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.resizer_color is not None: + json["resizerColor"] = self.resizer_color.to_json() + if self.resizer_handle_color is not None: + json["resizerHandleColor"] = self.resizer_handle_color.to_json() + if self.mask_color is not None: + json["maskColor"] = self.mask_color.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IsolationModeHighlightConfig: + return cls( + resizer_color=( + dom.RGBA.from_json(json["resizerColor"]) + if json.get("resizerColor", None) is not None + else None + ), + resizer_handle_color=( + dom.RGBA.from_json(json["resizerHandleColor"]) + if json.get("resizerHandleColor", None) is not None + else None + ), + mask_color=( + dom.RGBA.from_json(json["maskColor"]) + if json.get("maskColor", None) is not None + else None + ), + )
+ + + +
+[docs] +class InspectMode(enum.Enum): + SEARCH_FOR_NODE = "searchForNode" + SEARCH_FOR_UA_SHADOW_DOM = "searchForUAShadowDOM" + CAPTURE_AREA_SCREENSHOT = "captureAreaScreenshot" + SHOW_DISTANCES = "showDistances" + NONE = "none" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InspectMode: + return cls(json)
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Overlay.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Overlay.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_highlight_object_for_test( + node_id: dom.NodeId, + include_distance: typing.Optional[bool] = None, + include_style: typing.Optional[bool] = None, + color_format: typing.Optional[ColorFormat] = None, + show_accessibility_info: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, dict]: + """ + For testing. + + :param node_id: Id of the node to get highlight object for. + :param include_distance: *(Optional)* Whether to include distance info. + :param include_style: *(Optional)* Whether to include style info. + :param color_format: *(Optional)* The color format to get config with (default: hex). + :param show_accessibility_info: *(Optional)* Whether to show accessibility info (default: true). + :returns: Highlight data for the node. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + if include_distance is not None: + params["includeDistance"] = include_distance + if include_style is not None: + params["includeStyle"] = include_style + if color_format is not None: + params["colorFormat"] = color_format.to_json() + if show_accessibility_info is not None: + params["showAccessibilityInfo"] = show_accessibility_info + cmd_dict: T_JSON_DICT = { + "method": "Overlay.getHighlightObjectForTest", + "params": params, + } + json = yield cmd_dict + return dict(json["highlight"])
+ + + +
+[docs] +def get_grid_highlight_objects_for_test( + node_ids: typing.List[dom.NodeId], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, dict]: + """ + For Persistent Grid testing. + + :param node_ids: Ids of the node to get highlight object for. + :returns: Grid Highlight data for the node ids provided. + """ + params: T_JSON_DICT = dict() + params["nodeIds"] = [i.to_json() for i in node_ids] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.getGridHighlightObjectsForTest", + "params": params, + } + json = yield cmd_dict + return dict(json["highlights"])
+ + + +
+[docs] +def get_source_order_highlight_object_for_test( + node_id: dom.NodeId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, dict]: + """ + For Source Order Viewer testing. + + :param node_id: Id of the node to highlight. + :returns: Source order highlight data for the node id provided. + """ + params: T_JSON_DICT = dict() + params["nodeId"] = node_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.getSourceOrderHighlightObjectForTest", + "params": params, + } + json = yield cmd_dict + return dict(json["highlight"])
+ + + +
+[docs] +def hide_highlight() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Hides any highlight. + """ + cmd_dict: T_JSON_DICT = { + "method": "Overlay.hideHighlight", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def highlight_frame( + frame_id: page.FrameId, + content_color: typing.Optional[dom.RGBA] = None, + content_outline_color: typing.Optional[dom.RGBA] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights owner element of the frame with given id. + Deprecated: Doesn't work reliably and cannot be fixed due to process + separation (the owner node might be in a different process). Determine + the owner node in the client and use highlightNode. + + .. deprecated:: 1.3 + + :param frame_id: Identifier of the frame to highlight. + :param content_color: *(Optional)* The content box highlight fill color (default: transparent). + :param content_outline_color: *(Optional)* The content box highlight outline color (default: transparent). + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + if content_color is not None: + params["contentColor"] = content_color.to_json() + if content_outline_color is not None: + params["contentOutlineColor"] = content_outline_color.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.highlightFrame", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_node( + highlight_config: HighlightConfig, + node_id: typing.Optional[dom.NodeId] = None, + backend_node_id: typing.Optional[dom.BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, + selector: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights DOM node with given id or with the given JavaScript object wrapper. Either nodeId or + objectId must be specified. + + :param highlight_config: A descriptor for the highlight appearance. + :param node_id: *(Optional)* Identifier of the node to highlight. + :param backend_node_id: *(Optional)* Identifier of the backend node to highlight. + :param object_id: *(Optional)* JavaScript object id of the node to be highlighted. + :param selector: *(Optional)* Selectors to highlight relevant nodes. + """ + params: T_JSON_DICT = dict() + params["highlightConfig"] = highlight_config.to_json() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + if selector is not None: + params["selector"] = selector + cmd_dict: T_JSON_DICT = { + "method": "Overlay.highlightNode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_quad( + quad: dom.Quad, + color: typing.Optional[dom.RGBA] = None, + outline_color: typing.Optional[dom.RGBA] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights given quad. Coordinates are absolute with respect to the main frame viewport. + + :param quad: Quad to highlight + :param color: *(Optional)* The highlight fill color (default: transparent). + :param outline_color: *(Optional)* The highlight outline color (default: transparent). + """ + params: T_JSON_DICT = dict() + params["quad"] = quad.to_json() + if color is not None: + params["color"] = color.to_json() + if outline_color is not None: + params["outlineColor"] = outline_color.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.highlightQuad", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_rect( + x: int, + y: int, + width: int, + height: int, + color: typing.Optional[dom.RGBA] = None, + outline_color: typing.Optional[dom.RGBA] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights given rectangle. Coordinates are absolute with respect to the main frame viewport. + + :param x: X coordinate + :param y: Y coordinate + :param width: Rectangle width + :param height: Rectangle height + :param color: *(Optional)* The highlight fill color (default: transparent). + :param outline_color: *(Optional)* The highlight outline color (default: transparent). + """ + params: T_JSON_DICT = dict() + params["x"] = x + params["y"] = y + params["width"] = width + params["height"] = height + if color is not None: + params["color"] = color.to_json() + if outline_color is not None: + params["outlineColor"] = outline_color.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.highlightRect", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def highlight_source_order( + source_order_config: SourceOrderConfig, + node_id: typing.Optional[dom.NodeId] = None, + backend_node_id: typing.Optional[dom.BackendNodeId] = None, + object_id: typing.Optional[runtime.RemoteObjectId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights the source order of the children of the DOM node with given id or with the given + JavaScript object wrapper. Either nodeId or objectId must be specified. + + :param source_order_config: A descriptor for the appearance of the overlay drawing. + :param node_id: *(Optional)* Identifier of the node to highlight. + :param backend_node_id: *(Optional)* Identifier of the backend node to highlight. + :param object_id: *(Optional)* JavaScript object id of the node to be highlighted. + """ + params: T_JSON_DICT = dict() + params["sourceOrderConfig"] = source_order_config.to_json() + if node_id is not None: + params["nodeId"] = node_id.to_json() + if backend_node_id is not None: + params["backendNodeId"] = backend_node_id.to_json() + if object_id is not None: + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.highlightSourceOrder", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_inspect_mode( + mode: InspectMode, highlight_config: typing.Optional[HighlightConfig] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enters the 'inspect' mode. In this mode, elements that user is hovering over are highlighted. + Backend then generates 'inspectNodeRequested' event upon element selection. + + :param mode: Set an inspection mode. + :param highlight_config: *(Optional)* A descriptor for the highlight appearance of hovered-over nodes. May be omitted if ```enabled == false```. + """ + params: T_JSON_DICT = dict() + params["mode"] = mode.to_json() + if highlight_config is not None: + params["highlightConfig"] = highlight_config.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setInspectMode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_ad_highlights( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlights owner element of all frames detected to be ads. + + :param show: True for showing ad highlights + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowAdHighlights", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_paused_in_debugger_message( + message: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param message: *(Optional)* The message to display, also triggers resume and step over controls. + """ + params: T_JSON_DICT = dict() + if message is not None: + params["message"] = message + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setPausedInDebuggerMessage", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_debug_borders( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that backend shows debug borders on layers + + :param show: True for showing debug borders + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowDebugBorders", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_fps_counter( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that backend shows the FPS counter + + :param show: True for showing the FPS counter + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowFPSCounter", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_grid_overlays( + grid_node_highlight_configs: typing.List[GridNodeHighlightConfig], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Highlight multiple elements with the CSS Grid overlay. + + :param grid_node_highlight_configs: An array of node identifiers and descriptors for the highlight appearance. + """ + params: T_JSON_DICT = dict() + params["gridNodeHighlightConfigs"] = [ + i.to_json() for i in grid_node_highlight_configs + ] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowGridOverlays", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_flex_overlays( + flex_node_highlight_configs: typing.List[FlexNodeHighlightConfig], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param flex_node_highlight_configs: An array of node identifiers and descriptors for the highlight appearance. + """ + params: T_JSON_DICT = dict() + params["flexNodeHighlightConfigs"] = [ + i.to_json() for i in flex_node_highlight_configs + ] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowFlexOverlays", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_scroll_snap_overlays( + scroll_snap_highlight_configs: typing.List[ScrollSnapHighlightConfig], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param scroll_snap_highlight_configs: An array of node identifiers and descriptors for the highlight appearance. + """ + params: T_JSON_DICT = dict() + params["scrollSnapHighlightConfigs"] = [ + i.to_json() for i in scroll_snap_highlight_configs + ] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowScrollSnapOverlays", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_container_query_overlays( + container_query_highlight_configs: typing.List[ContainerQueryHighlightConfig], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param container_query_highlight_configs: An array of node identifiers and descriptors for the highlight appearance. + """ + params: T_JSON_DICT = dict() + params["containerQueryHighlightConfigs"] = [ + i.to_json() for i in container_query_highlight_configs + ] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowContainerQueryOverlays", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_paint_rects( + result: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that backend shows paint rectangles + + :param result: True for showing paint rectangles + """ + params: T_JSON_DICT = dict() + params["result"] = result + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowPaintRects", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_layout_shift_regions( + result: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that backend shows layout shift regions + + :param result: True for showing layout shift regions + """ + params: T_JSON_DICT = dict() + params["result"] = result + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowLayoutShiftRegions", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_scroll_bottleneck_rects( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests that backend shows scroll bottleneck rects + + :param show: True for showing scroll bottleneck rects + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowScrollBottleneckRects", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_show_hit_test_borders( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deprecated, no longer has any effect. + + .. deprecated:: 1.3 + + :param show: True for showing hit-test borders + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowHitTestBorders", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_web_vitals(show: bool) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Request that backend shows an overlay with web vital metrics. + + :param show: + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowWebVitals", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_viewport_size_on_resize( + show: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Paints viewport size upon main frame resize. + + :param show: Whether to paint size or not. + """ + params: T_JSON_DICT = dict() + params["show"] = show + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowViewportSizeOnResize", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_hinge( + hinge_config: typing.Optional[HingeConfig] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Add a dual screen device hinge + + :param hinge_config: *(Optional)* hinge data, null means hideHinge + """ + params: T_JSON_DICT = dict() + if hinge_config is not None: + params["hingeConfig"] = hinge_config.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowHinge", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_isolated_elements( + isolated_element_highlight_configs: typing.List[IsolatedElementHighlightConfig], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Show elements in isolation mode with overlays. + + :param isolated_element_highlight_configs: An array of node identifiers and descriptors for the highlight appearance. + """ + params: T_JSON_DICT = dict() + params["isolatedElementHighlightConfigs"] = [ + i.to_json() for i in isolated_element_highlight_configs + ] + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowIsolatedElements", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_show_window_controls_overlay( + window_controls_overlay_config: typing.Optional[WindowControlsOverlayConfig] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Show Window Controls Overlay for PWA + + :param window_controls_overlay_config: *(Optional)* Window Controls Overlay data, null means hide Window Controls Overlay + """ + params: T_JSON_DICT = dict() + if window_controls_overlay_config is not None: + params["windowControlsOverlayConfig"] = window_controls_overlay_config.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Overlay.setShowWindowControlsOverlay", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Overlay.inspectNodeRequested") +@dataclass +class InspectNodeRequested: + """ + Fired when the node should be inspected. This happens after call to ``setInspectMode`` or when + user manually inspects an element. + """ + + #: Id of the node to inspect. + backend_node_id: dom.BackendNodeId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InspectNodeRequested: + return cls(backend_node_id=dom.BackendNodeId.from_json(json["backendNodeId"]))
+ + + +
+[docs] +@event_class("Overlay.nodeHighlightRequested") +@dataclass +class NodeHighlightRequested: + """ + Fired when the node should be highlighted. This happens after call to ``setInspectMode``. + """ + + node_id: dom.NodeId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodeHighlightRequested: + return cls(node_id=dom.NodeId.from_json(json["nodeId"]))
+ + + +
+[docs] +@event_class("Overlay.screenshotRequested") +@dataclass +class ScreenshotRequested: + """ + Fired when user asks to capture screenshot of some area on the page. + """ + + #: Viewport to capture, in device independent pixels (dip). + viewport: page.Viewport + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreenshotRequested: + return cls(viewport=page.Viewport.from_json(json["viewport"]))
+ + + +
+[docs] +@event_class("Overlay.inspectModeCanceled") +@dataclass +class InspectModeCanceled: + """ + Fired when user cancels the inspect mode. + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InspectModeCanceled: + return cls()
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/page.html b/docs/_build/html/_modules/nodriver/cdp/page.html new file mode 100644 index 0000000..7d556c2 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/page.html @@ -0,0 +1,4894 @@ + + + + + + + + nodriver.cdp.page - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.page

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Page
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import debugger
+from . import dom
+from . import emulation
+from . import io
+from . import network
+from . import runtime
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class FrameId(str): + """ + Unique frame identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> FrameId: + return cls(json) + + def __repr__(self): + return "FrameId({})".format(super().__repr__())
+ + + +
+[docs] +class AdFrameType(enum.Enum): + """ + Indicates whether a frame has been identified as an ad. + """ + + NONE = "none" + CHILD = "child" + ROOT = "root" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AdFrameType: + return cls(json)
+ + + +
+[docs] +class AdFrameExplanation(enum.Enum): + PARENT_IS_AD = "ParentIsAd" + CREATED_BY_AD_SCRIPT = "CreatedByAdScript" + MATCHED_BLOCKING_RULE = "MatchedBlockingRule" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AdFrameExplanation: + return cls(json)
+ + + +
+[docs] +@dataclass +class AdFrameStatus: + """ + Indicates whether a frame has been identified as an ad and why. + """ + + ad_frame_type: AdFrameType + + explanations: typing.Optional[typing.List[AdFrameExplanation]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["adFrameType"] = self.ad_frame_type.to_json() + if self.explanations is not None: + json["explanations"] = [i.to_json() for i in self.explanations] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AdFrameStatus: + return cls( + ad_frame_type=AdFrameType.from_json(json["adFrameType"]), + explanations=( + [AdFrameExplanation.from_json(i) for i in json["explanations"]] + if json.get("explanations", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AdScriptId: + """ + Identifies the bottom-most script which caused the frame to be labelled + as an ad. + """ + + #: Script Id of the bottom-most script which caused the frame to be labelled + #: as an ad. + script_id: runtime.ScriptId + + #: Id of adScriptId's debugger. + debugger_id: runtime.UniqueDebuggerId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scriptId"] = self.script_id.to_json() + json["debuggerId"] = self.debugger_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AdScriptId: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + debugger_id=runtime.UniqueDebuggerId.from_json(json["debuggerId"]), + )
+ + + +
+[docs] +class SecureContextType(enum.Enum): + """ + Indicates whether the frame is a secure context and why it is the case. + """ + + SECURE = "Secure" + SECURE_LOCALHOST = "SecureLocalhost" + INSECURE_SCHEME = "InsecureScheme" + INSECURE_ANCESTOR = "InsecureAncestor" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SecureContextType: + return cls(json)
+ + + +
+[docs] +class CrossOriginIsolatedContextType(enum.Enum): + """ + Indicates whether the frame is cross-origin isolated and why it is the case. + """ + + ISOLATED = "Isolated" + NOT_ISOLATED = "NotIsolated" + NOT_ISOLATED_FEATURE_DISABLED = "NotIsolatedFeatureDisabled" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CrossOriginIsolatedContextType: + return cls(json)
+ + + +
+[docs] +class GatedAPIFeatures(enum.Enum): + SHARED_ARRAY_BUFFERS = "SharedArrayBuffers" + SHARED_ARRAY_BUFFERS_TRANSFER_ALLOWED = "SharedArrayBuffersTransferAllowed" + PERFORMANCE_MEASURE_MEMORY = "PerformanceMeasureMemory" + PERFORMANCE_PROFILE = "PerformanceProfile" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> GatedAPIFeatures: + return cls(json)
+ + + +
+[docs] +class PermissionsPolicyFeature(enum.Enum): + """ + All Permissions Policy features. This enum should match the one defined + in third_party/blink/renderer/core/permissions_policy/permissions_policy_features.json5. + """ + + ACCELEROMETER = "accelerometer" + AMBIENT_LIGHT_SENSOR = "ambient-light-sensor" + ATTRIBUTION_REPORTING = "attribution-reporting" + AUTOPLAY = "autoplay" + BLUETOOTH = "bluetooth" + BROWSING_TOPICS = "browsing-topics" + CAMERA = "camera" + CAPTURED_SURFACE_CONTROL = "captured-surface-control" + CH_DPR = "ch-dpr" + CH_DEVICE_MEMORY = "ch-device-memory" + CH_DOWNLINK = "ch-downlink" + CH_ECT = "ch-ect" + CH_PREFERS_COLOR_SCHEME = "ch-prefers-color-scheme" + CH_PREFERS_REDUCED_MOTION = "ch-prefers-reduced-motion" + CH_PREFERS_REDUCED_TRANSPARENCY = "ch-prefers-reduced-transparency" + CH_RTT = "ch-rtt" + CH_SAVE_DATA = "ch-save-data" + CH_UA = "ch-ua" + CH_UA_ARCH = "ch-ua-arch" + CH_UA_BITNESS = "ch-ua-bitness" + CH_UA_PLATFORM = "ch-ua-platform" + CH_UA_MODEL = "ch-ua-model" + CH_UA_MOBILE = "ch-ua-mobile" + CH_UA_FORM_FACTORS = "ch-ua-form-factors" + CH_UA_FULL_VERSION = "ch-ua-full-version" + CH_UA_FULL_VERSION_LIST = "ch-ua-full-version-list" + CH_UA_PLATFORM_VERSION = "ch-ua-platform-version" + CH_UA_WOW64 = "ch-ua-wow64" + CH_VIEWPORT_HEIGHT = "ch-viewport-height" + CH_VIEWPORT_WIDTH = "ch-viewport-width" + CH_WIDTH = "ch-width" + CLIPBOARD_READ = "clipboard-read" + CLIPBOARD_WRITE = "clipboard-write" + COMPUTE_PRESSURE = "compute-pressure" + CROSS_ORIGIN_ISOLATED = "cross-origin-isolated" + DEFERRED_FETCH = "deferred-fetch" + DIRECT_SOCKETS = "direct-sockets" + DISPLAY_CAPTURE = "display-capture" + DOCUMENT_DOMAIN = "document-domain" + ENCRYPTED_MEDIA = "encrypted-media" + EXECUTION_WHILE_OUT_OF_VIEWPORT = "execution-while-out-of-viewport" + EXECUTION_WHILE_NOT_RENDERED = "execution-while-not-rendered" + FOCUS_WITHOUT_USER_ACTIVATION = "focus-without-user-activation" + FULLSCREEN = "fullscreen" + FROBULATE = "frobulate" + GAMEPAD = "gamepad" + GEOLOCATION = "geolocation" + GYROSCOPE = "gyroscope" + HID = "hid" + IDENTITY_CREDENTIALS_GET = "identity-credentials-get" + IDLE_DETECTION = "idle-detection" + INTEREST_COHORT = "interest-cohort" + JOIN_AD_INTEREST_GROUP = "join-ad-interest-group" + KEYBOARD_MAP = "keyboard-map" + LOCAL_FONTS = "local-fonts" + MAGNETOMETER = "magnetometer" + MICROPHONE = "microphone" + MIDI = "midi" + OTP_CREDENTIALS = "otp-credentials" + PAYMENT = "payment" + PICTURE_IN_PICTURE = "picture-in-picture" + PRIVATE_AGGREGATION = "private-aggregation" + PRIVATE_STATE_TOKEN_ISSUANCE = "private-state-token-issuance" + PRIVATE_STATE_TOKEN_REDEMPTION = "private-state-token-redemption" + PUBLICKEY_CREDENTIALS_CREATE = "publickey-credentials-create" + PUBLICKEY_CREDENTIALS_GET = "publickey-credentials-get" + RUN_AD_AUCTION = "run-ad-auction" + SCREEN_WAKE_LOCK = "screen-wake-lock" + SERIAL = "serial" + SHARED_AUTOFILL = "shared-autofill" + SHARED_STORAGE = "shared-storage" + SHARED_STORAGE_SELECT_URL = "shared-storage-select-url" + SMART_CARD = "smart-card" + SPEAKER_SELECTION = "speaker-selection" + STORAGE_ACCESS = "storage-access" + SUB_APPS = "sub-apps" + SYNC_XHR = "sync-xhr" + UNLOAD = "unload" + USB = "usb" + USB_UNRESTRICTED = "usb-unrestricted" + VERTICAL_SCROLL = "vertical-scroll" + WEB_PRINTING = "web-printing" + WEB_SHARE = "web-share" + WINDOW_MANAGEMENT = "window-management" + XR_SPATIAL_TRACKING = "xr-spatial-tracking" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PermissionsPolicyFeature: + return cls(json)
+ + + +
+[docs] +class PermissionsPolicyBlockReason(enum.Enum): + """ + Reason for a permissions policy feature to be disabled. + """ + + HEADER = "Header" + IFRAME_ATTRIBUTE = "IframeAttribute" + IN_FENCED_FRAME_TREE = "InFencedFrameTree" + IN_ISOLATED_APP = "InIsolatedApp" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PermissionsPolicyBlockReason: + return cls(json)
+ + + +
+[docs] +@dataclass +class PermissionsPolicyBlockLocator: + frame_id: FrameId + + block_reason: PermissionsPolicyBlockReason + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["frameId"] = self.frame_id.to_json() + json["blockReason"] = self.block_reason.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PermissionsPolicyBlockLocator: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + block_reason=PermissionsPolicyBlockReason.from_json(json["blockReason"]), + )
+ + + +
+[docs] +@dataclass +class PermissionsPolicyFeatureState: + feature: PermissionsPolicyFeature + + allowed: bool + + locator: typing.Optional[PermissionsPolicyBlockLocator] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["feature"] = self.feature.to_json() + json["allowed"] = self.allowed + if self.locator is not None: + json["locator"] = self.locator.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PermissionsPolicyFeatureState: + return cls( + feature=PermissionsPolicyFeature.from_json(json["feature"]), + allowed=bool(json["allowed"]), + locator=( + PermissionsPolicyBlockLocator.from_json(json["locator"]) + if json.get("locator", None) is not None + else None + ), + )
+ + + +
+[docs] +class OriginTrialTokenStatus(enum.Enum): + """ + Origin Trial(https://www.chromium.org/blink/origin-trials) support. + Status for an Origin Trial token. + """ + + SUCCESS = "Success" + NOT_SUPPORTED = "NotSupported" + INSECURE = "Insecure" + EXPIRED = "Expired" + WRONG_ORIGIN = "WrongOrigin" + INVALID_SIGNATURE = "InvalidSignature" + MALFORMED = "Malformed" + WRONG_VERSION = "WrongVersion" + FEATURE_DISABLED = "FeatureDisabled" + TOKEN_DISABLED = "TokenDisabled" + FEATURE_DISABLED_FOR_USER = "FeatureDisabledForUser" + UNKNOWN_TRIAL = "UnknownTrial" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> OriginTrialTokenStatus: + return cls(json)
+ + + +
+[docs] +class OriginTrialStatus(enum.Enum): + """ + Status for an Origin Trial. + """ + + ENABLED = "Enabled" + VALID_TOKEN_NOT_PROVIDED = "ValidTokenNotProvided" + OS_NOT_SUPPORTED = "OSNotSupported" + TRIAL_NOT_ALLOWED = "TrialNotAllowed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> OriginTrialStatus: + return cls(json)
+ + + +
+[docs] +class OriginTrialUsageRestriction(enum.Enum): + NONE = "None" + SUBSET = "Subset" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> OriginTrialUsageRestriction: + return cls(json)
+ + + +
+[docs] +@dataclass +class OriginTrialToken: + origin: str + + match_sub_domains: bool + + trial_name: str + + expiry_time: network.TimeSinceEpoch + + is_third_party: bool + + usage_restriction: OriginTrialUsageRestriction + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin + json["matchSubDomains"] = self.match_sub_domains + json["trialName"] = self.trial_name + json["expiryTime"] = self.expiry_time.to_json() + json["isThirdParty"] = self.is_third_party + json["usageRestriction"] = self.usage_restriction.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> OriginTrialToken: + return cls( + origin=str(json["origin"]), + match_sub_domains=bool(json["matchSubDomains"]), + trial_name=str(json["trialName"]), + expiry_time=network.TimeSinceEpoch.from_json(json["expiryTime"]), + is_third_party=bool(json["isThirdParty"]), + usage_restriction=OriginTrialUsageRestriction.from_json( + json["usageRestriction"] + ), + )
+ + + +
+[docs] +@dataclass +class OriginTrialTokenWithStatus: + raw_token_text: str + + status: OriginTrialTokenStatus + + #: ``parsedToken`` is present only when the token is extractable and + #: parsable. + parsed_token: typing.Optional[OriginTrialToken] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["rawTokenText"] = self.raw_token_text + json["status"] = self.status.to_json() + if self.parsed_token is not None: + json["parsedToken"] = self.parsed_token.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> OriginTrialTokenWithStatus: + return cls( + raw_token_text=str(json["rawTokenText"]), + status=OriginTrialTokenStatus.from_json(json["status"]), + parsed_token=( + OriginTrialToken.from_json(json["parsedToken"]) + if json.get("parsedToken", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class OriginTrial: + trial_name: str + + status: OriginTrialStatus + + tokens_with_status: typing.List[OriginTrialTokenWithStatus] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["trialName"] = self.trial_name + json["status"] = self.status.to_json() + json["tokensWithStatus"] = [i.to_json() for i in self.tokens_with_status] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> OriginTrial: + return cls( + trial_name=str(json["trialName"]), + status=OriginTrialStatus.from_json(json["status"]), + tokens_with_status=[ + OriginTrialTokenWithStatus.from_json(i) + for i in json["tokensWithStatus"] + ], + )
+ + + +
+[docs] +@dataclass +class Frame: + """ + Information about the Frame on the page. + """ + + #: Frame unique identifier. + id_: FrameId + + #: Identifier of the loader associated with this frame. + loader_id: network.LoaderId + + #: Frame document's URL without fragment. + url: str + + #: Frame document's registered domain, taking the public suffixes list into account. + #: Extracted from the Frame's url. + #: Example URLs: http://www.google.com/file.html -> "google.com" + #: http://a.b.co.uk/file.html -> "b.co.uk" + domain_and_registry: str + + #: Frame document's security origin. + security_origin: str + + #: Frame document's mimeType as determined by the browser. + mime_type: str + + #: Indicates whether the main document is a secure context and explains why that is the case. + secure_context_type: SecureContextType + + #: Indicates whether this is a cross origin isolated context. + cross_origin_isolated_context_type: CrossOriginIsolatedContextType + + #: Indicated which gated APIs / features are available. + gated_api_features: typing.List[GatedAPIFeatures] + + #: Parent frame identifier. + parent_id: typing.Optional[FrameId] = None + + #: Frame's name as specified in the tag. + name: typing.Optional[str] = None + + #: Frame document's URL fragment including the '#'. + url_fragment: typing.Optional[str] = None + + #: If the frame failed to load, this contains the URL that could not be loaded. Note that unlike url above, this URL may contain a fragment. + unreachable_url: typing.Optional[str] = None + + #: Indicates whether this frame was tagged as an ad and why. + ad_frame_status: typing.Optional[AdFrameStatus] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["loaderId"] = self.loader_id.to_json() + json["url"] = self.url + json["domainAndRegistry"] = self.domain_and_registry + json["securityOrigin"] = self.security_origin + json["mimeType"] = self.mime_type + json["secureContextType"] = self.secure_context_type.to_json() + json["crossOriginIsolatedContextType"] = ( + self.cross_origin_isolated_context_type.to_json() + ) + json["gatedAPIFeatures"] = [i.to_json() for i in self.gated_api_features] + if self.parent_id is not None: + json["parentId"] = self.parent_id.to_json() + if self.name is not None: + json["name"] = self.name + if self.url_fragment is not None: + json["urlFragment"] = self.url_fragment + if self.unreachable_url is not None: + json["unreachableUrl"] = self.unreachable_url + if self.ad_frame_status is not None: + json["adFrameStatus"] = self.ad_frame_status.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Frame: + return cls( + id_=FrameId.from_json(json["id"]), + loader_id=network.LoaderId.from_json(json["loaderId"]), + url=str(json["url"]), + domain_and_registry=str(json["domainAndRegistry"]), + security_origin=str(json["securityOrigin"]), + mime_type=str(json["mimeType"]), + secure_context_type=SecureContextType.from_json(json["secureContextType"]), + cross_origin_isolated_context_type=CrossOriginIsolatedContextType.from_json( + json["crossOriginIsolatedContextType"] + ), + gated_api_features=[ + GatedAPIFeatures.from_json(i) for i in json["gatedAPIFeatures"] + ], + parent_id=( + FrameId.from_json(json["parentId"]) + if json.get("parentId", None) is not None + else None + ), + name=str(json["name"]) if json.get("name", None) is not None else None, + url_fragment=( + str(json["urlFragment"]) + if json.get("urlFragment", None) is not None + else None + ), + unreachable_url=( + str(json["unreachableUrl"]) + if json.get("unreachableUrl", None) is not None + else None + ), + ad_frame_status=( + AdFrameStatus.from_json(json["adFrameStatus"]) + if json.get("adFrameStatus", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FrameResource: + """ + Information about the Resource on the page. + """ + + #: Resource URL. + url: str + + #: Type of this resource. + type_: network.ResourceType + + #: Resource mimeType as determined by the browser. + mime_type: str + + #: last-modified timestamp as reported by server. + last_modified: typing.Optional[network.TimeSinceEpoch] = None + + #: Resource content size. + content_size: typing.Optional[float] = None + + #: True if the resource failed to load. + failed: typing.Optional[bool] = None + + #: True if the resource was canceled during loading. + canceled: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["type"] = self.type_.to_json() + json["mimeType"] = self.mime_type + if self.last_modified is not None: + json["lastModified"] = self.last_modified.to_json() + if self.content_size is not None: + json["contentSize"] = self.content_size + if self.failed is not None: + json["failed"] = self.failed + if self.canceled is not None: + json["canceled"] = self.canceled + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameResource: + return cls( + url=str(json["url"]), + type_=network.ResourceType.from_json(json["type"]), + mime_type=str(json["mimeType"]), + last_modified=( + network.TimeSinceEpoch.from_json(json["lastModified"]) + if json.get("lastModified", None) is not None + else None + ), + content_size=( + float(json["contentSize"]) + if json.get("contentSize", None) is not None + else None + ), + failed=( + bool(json["failed"]) if json.get("failed", None) is not None else None + ), + canceled=( + bool(json["canceled"]) + if json.get("canceled", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FrameResourceTree: + """ + Information about the Frame hierarchy along with their cached resources. + """ + + #: Frame information for this tree item. + frame: Frame + + #: Information about frame resources. + resources: typing.List[FrameResource] + + #: Child frames. + child_frames: typing.Optional[typing.List[FrameResourceTree]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["frame"] = self.frame.to_json() + json["resources"] = [i.to_json() for i in self.resources] + if self.child_frames is not None: + json["childFrames"] = [i.to_json() for i in self.child_frames] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameResourceTree: + return cls( + frame=Frame.from_json(json["frame"]), + resources=[FrameResource.from_json(i) for i in json["resources"]], + child_frames=( + [FrameResourceTree.from_json(i) for i in json["childFrames"]] + if json.get("childFrames", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FrameTree: + """ + Information about the Frame hierarchy. + """ + + #: Frame information for this tree item. + frame: Frame + + #: Child frames. + child_frames: typing.Optional[typing.List[FrameTree]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["frame"] = self.frame.to_json() + if self.child_frames is not None: + json["childFrames"] = [i.to_json() for i in self.child_frames] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameTree: + return cls( + frame=Frame.from_json(json["frame"]), + child_frames=( + [FrameTree.from_json(i) for i in json["childFrames"]] + if json.get("childFrames", None) is not None + else None + ), + )
+ + + +
+[docs] +class ScriptIdentifier(str): + """ + Unique script identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> ScriptIdentifier: + return cls(json) + + def __repr__(self): + return "ScriptIdentifier({})".format(super().__repr__())
+ + + +
+[docs] +class TransitionType(enum.Enum): + """ + Transition type. + """ + + LINK = "link" + TYPED = "typed" + ADDRESS_BAR = "address_bar" + AUTO_BOOKMARK = "auto_bookmark" + AUTO_SUBFRAME = "auto_subframe" + MANUAL_SUBFRAME = "manual_subframe" + GENERATED = "generated" + AUTO_TOPLEVEL = "auto_toplevel" + FORM_SUBMIT = "form_submit" + RELOAD = "reload" + KEYWORD = "keyword" + KEYWORD_GENERATED = "keyword_generated" + OTHER = "other" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> TransitionType: + return cls(json)
+ + + + + + + +
+[docs] +@dataclass +class ScreencastFrameMetadata: + """ + Screencast frame metadata. + """ + + #: Top offset in DIP. + offset_top: float + + #: Page scale factor. + page_scale_factor: float + + #: Device screen width in DIP. + device_width: float + + #: Device screen height in DIP. + device_height: float + + #: Position of horizontal scroll in CSS pixels. + scroll_offset_x: float + + #: Position of vertical scroll in CSS pixels. + scroll_offset_y: float + + #: Frame swap timestamp. + timestamp: typing.Optional[network.TimeSinceEpoch] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["offsetTop"] = self.offset_top + json["pageScaleFactor"] = self.page_scale_factor + json["deviceWidth"] = self.device_width + json["deviceHeight"] = self.device_height + json["scrollOffsetX"] = self.scroll_offset_x + json["scrollOffsetY"] = self.scroll_offset_y + if self.timestamp is not None: + json["timestamp"] = self.timestamp.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreencastFrameMetadata: + return cls( + offset_top=float(json["offsetTop"]), + page_scale_factor=float(json["pageScaleFactor"]), + device_width=float(json["deviceWidth"]), + device_height=float(json["deviceHeight"]), + scroll_offset_x=float(json["scrollOffsetX"]), + scroll_offset_y=float(json["scrollOffsetY"]), + timestamp=( + network.TimeSinceEpoch.from_json(json["timestamp"]) + if json.get("timestamp", None) is not None + else None + ), + )
+ + + +
+[docs] +class DialogType(enum.Enum): + """ + Javascript dialog type. + """ + + ALERT = "alert" + CONFIRM = "confirm" + PROMPT = "prompt" + BEFOREUNLOAD = "beforeunload" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DialogType: + return cls(json)
+ + + +
+[docs] +@dataclass +class AppManifestError: + """ + Error while paring app manifest. + """ + + #: Error message. + message: str + + #: If critical, this is a non-recoverable parse error. + critical: int + + #: Error line. + line: int + + #: Error column. + column: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["message"] = self.message + json["critical"] = self.critical + json["line"] = self.line + json["column"] = self.column + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AppManifestError: + return cls( + message=str(json["message"]), + critical=int(json["critical"]), + line=int(json["line"]), + column=int(json["column"]), + )
+ + + +
+[docs] +@dataclass +class AppManifestParsedProperties: + """ + Parsed app manifest properties. + """ + + #: Computed scope value + scope: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scope"] = self.scope + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AppManifestParsedProperties: + return cls( + scope=str(json["scope"]), + )
+ + + +
+[docs] +@dataclass +class LayoutViewport: + """ + Layout viewport position and dimensions. + """ + + #: Horizontal offset relative to the document (CSS pixels). + page_x: int + + #: Vertical offset relative to the document (CSS pixels). + page_y: int + + #: Width (CSS pixels), excludes scrollbar if present. + client_width: int + + #: Height (CSS pixels), excludes scrollbar if present. + client_height: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["pageX"] = self.page_x + json["pageY"] = self.page_y + json["clientWidth"] = self.client_width + json["clientHeight"] = self.client_height + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayoutViewport: + return cls( + page_x=int(json["pageX"]), + page_y=int(json["pageY"]), + client_width=int(json["clientWidth"]), + client_height=int(json["clientHeight"]), + )
+ + + +
+[docs] +@dataclass +class VisualViewport: + """ + Visual viewport position, dimensions, and scale. + """ + + #: Horizontal offset relative to the layout viewport (CSS pixels). + offset_x: float + + #: Vertical offset relative to the layout viewport (CSS pixels). + offset_y: float + + #: Horizontal offset relative to the document (CSS pixels). + page_x: float + + #: Vertical offset relative to the document (CSS pixels). + page_y: float + + #: Width (CSS pixels), excludes scrollbar if present. + client_width: float + + #: Height (CSS pixels), excludes scrollbar if present. + client_height: float + + #: Scale relative to the ideal viewport (size at width=device-width). + scale: float + + #: Page zoom factor (CSS to device independent pixels ratio). + zoom: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["offsetX"] = self.offset_x + json["offsetY"] = self.offset_y + json["pageX"] = self.page_x + json["pageY"] = self.page_y + json["clientWidth"] = self.client_width + json["clientHeight"] = self.client_height + json["scale"] = self.scale + if self.zoom is not None: + json["zoom"] = self.zoom + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VisualViewport: + return cls( + offset_x=float(json["offsetX"]), + offset_y=float(json["offsetY"]), + page_x=float(json["pageX"]), + page_y=float(json["pageY"]), + client_width=float(json["clientWidth"]), + client_height=float(json["clientHeight"]), + scale=float(json["scale"]), + zoom=float(json["zoom"]) if json.get("zoom", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class Viewport: + """ + Viewport for capturing screenshot. + """ + + #: X offset in device independent pixels (dip). + x: float + + #: Y offset in device independent pixels (dip). + y: float + + #: Rectangle width in device independent pixels (dip). + width: float + + #: Rectangle height in device independent pixels (dip). + height: float + + #: Page scale factor. + scale: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["x"] = self.x + json["y"] = self.y + json["width"] = self.width + json["height"] = self.height + json["scale"] = self.scale + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Viewport: + return cls( + x=float(json["x"]), + y=float(json["y"]), + width=float(json["width"]), + height=float(json["height"]), + scale=float(json["scale"]), + )
+ + + +
+[docs] +@dataclass +class FontFamilies: + """ + Generic font families collection. + """ + + #: The standard font-family. + standard: typing.Optional[str] = None + + #: The fixed font-family. + fixed: typing.Optional[str] = None + + #: The serif font-family. + serif: typing.Optional[str] = None + + #: The sansSerif font-family. + sans_serif: typing.Optional[str] = None + + #: The cursive font-family. + cursive: typing.Optional[str] = None + + #: The fantasy font-family. + fantasy: typing.Optional[str] = None + + #: The math font-family. + math: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.standard is not None: + json["standard"] = self.standard + if self.fixed is not None: + json["fixed"] = self.fixed + if self.serif is not None: + json["serif"] = self.serif + if self.sans_serif is not None: + json["sansSerif"] = self.sans_serif + if self.cursive is not None: + json["cursive"] = self.cursive + if self.fantasy is not None: + json["fantasy"] = self.fantasy + if self.math is not None: + json["math"] = self.math + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FontFamilies: + return cls( + standard=( + str(json["standard"]) + if json.get("standard", None) is not None + else None + ), + fixed=str(json["fixed"]) if json.get("fixed", None) is not None else None, + serif=str(json["serif"]) if json.get("serif", None) is not None else None, + sans_serif=( + str(json["sansSerif"]) + if json.get("sansSerif", None) is not None + else None + ), + cursive=( + str(json["cursive"]) if json.get("cursive", None) is not None else None + ), + fantasy=( + str(json["fantasy"]) if json.get("fantasy", None) is not None else None + ), + math=str(json["math"]) if json.get("math", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class ScriptFontFamilies: + """ + Font families collection for a script. + """ + + #: Name of the script which these font families are defined for. + script: str + + #: Generic font families collection for the script. + font_families: FontFamilies + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["script"] = self.script + json["fontFamilies"] = self.font_families.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScriptFontFamilies: + return cls( + script=str(json["script"]), + font_families=FontFamilies.from_json(json["fontFamilies"]), + )
+ + + +
+[docs] +@dataclass +class FontSizes: + """ + Default font sizes. + """ + + #: Default standard font size. + standard: typing.Optional[int] = None + + #: Default fixed font size. + fixed: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.standard is not None: + json["standard"] = self.standard + if self.fixed is not None: + json["fixed"] = self.fixed + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FontSizes: + return cls( + standard=( + int(json["standard"]) + if json.get("standard", None) is not None + else None + ), + fixed=int(json["fixed"]) if json.get("fixed", None) is not None else None, + )
+ + + +
+[docs] +class ClientNavigationReason(enum.Enum): + FORM_SUBMISSION_GET = "formSubmissionGet" + FORM_SUBMISSION_POST = "formSubmissionPost" + HTTP_HEADER_REFRESH = "httpHeaderRefresh" + SCRIPT_INITIATED = "scriptInitiated" + META_TAG_REFRESH = "metaTagRefresh" + PAGE_BLOCK_INTERSTITIAL = "pageBlockInterstitial" + RELOAD = "reload" + ANCHOR_CLICK = "anchorClick" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ClientNavigationReason: + return cls(json)
+ + + +
+[docs] +class ClientNavigationDisposition(enum.Enum): + CURRENT_TAB = "currentTab" + NEW_TAB = "newTab" + NEW_WINDOW = "newWindow" + DOWNLOAD = "download" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ClientNavigationDisposition: + return cls(json)
+ + + +
+[docs] +@dataclass +class InstallabilityErrorArgument: + #: Argument name (e.g. name:'minimum-icon-size-in-pixels'). + name: str + + #: Argument value (e.g. value:'64'). + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InstallabilityErrorArgument: + return cls( + name=str(json["name"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class InstallabilityError: + """ + The installability error + """ + + #: The error id (e.g. 'manifest-missing-suitable-icon'). + error_id: str + + #: The list of error arguments (e.g. {name:'minimum-icon-size-in-pixels', value:'64'}). + error_arguments: typing.List[InstallabilityErrorArgument] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["errorId"] = self.error_id + json["errorArguments"] = [i.to_json() for i in self.error_arguments] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InstallabilityError: + return cls( + error_id=str(json["errorId"]), + error_arguments=[ + InstallabilityErrorArgument.from_json(i) for i in json["errorArguments"] + ], + )
+ + + +
+[docs] +class ReferrerPolicy(enum.Enum): + """ + The referring-policy used for the navigation. + """ + + NO_REFERRER = "noReferrer" + NO_REFERRER_WHEN_DOWNGRADE = "noReferrerWhenDowngrade" + ORIGIN = "origin" + ORIGIN_WHEN_CROSS_ORIGIN = "originWhenCrossOrigin" + SAME_ORIGIN = "sameOrigin" + STRICT_ORIGIN = "strictOrigin" + STRICT_ORIGIN_WHEN_CROSS_ORIGIN = "strictOriginWhenCrossOrigin" + UNSAFE_URL = "unsafeUrl" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ReferrerPolicy: + return cls(json)
+ + + +
+[docs] +@dataclass +class CompilationCacheParams: + """ + Per-script compilation cache parameters for ``Page.produceCompilationCache`` + """ + + #: The URL of the script to produce a compilation cache entry for. + url: str + + #: A hint to the backend whether eager compilation is recommended. + #: (the actual compilation mode used is upon backend discretion). + eager: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + if self.eager is not None: + json["eager"] = self.eager + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CompilationCacheParams: + return cls( + url=str(json["url"]), + eager=bool(json["eager"]) if json.get("eager", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class FileFilter: + name: typing.Optional[str] = None + + accepts: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.name is not None: + json["name"] = self.name + if self.accepts is not None: + json["accepts"] = [i for i in self.accepts] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FileFilter: + return cls( + name=str(json["name"]) if json.get("name", None) is not None else None, + accepts=( + [str(i) for i in json["accepts"]] + if json.get("accepts", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class FileHandler: + action: str + + name: str + + #: Won't repeat the enums, using string for easy comparison. Same as the + #: other enums below. + launch_type: str + + icons: typing.Optional[typing.List[ImageResource]] = None + + #: Mimic a map, name is the key, accepts is the value. + accepts: typing.Optional[typing.List[FileFilter]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["action"] = self.action + json["name"] = self.name + json["launchType"] = self.launch_type + if self.icons is not None: + json["icons"] = [i.to_json() for i in self.icons] + if self.accepts is not None: + json["accepts"] = [i.to_json() for i in self.accepts] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FileHandler: + return cls( + action=str(json["action"]), + name=str(json["name"]), + launch_type=str(json["launchType"]), + icons=( + [ImageResource.from_json(i) for i in json["icons"]] + if json.get("icons", None) is not None + else None + ), + accepts=( + [FileFilter.from_json(i) for i in json["accepts"]] + if json.get("accepts", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ImageResource: + """ + The image definition used in both icon and screenshot. + """ + + #: The src field in the definition, but changing to url in favor of + #: consistency. + url: str + + sizes: typing.Optional[str] = None + + type_: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + if self.sizes is not None: + json["sizes"] = self.sizes + if self.type_ is not None: + json["type"] = self.type_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ImageResource: + return cls( + url=str(json["url"]), + sizes=str(json["sizes"]) if json.get("sizes", None) is not None else None, + type_=str(json["type"]) if json.get("type", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class LaunchHandler: + client_mode: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["clientMode"] = self.client_mode + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LaunchHandler: + return cls( + client_mode=str(json["clientMode"]), + )
+ + + +
+[docs] +@dataclass +class ProtocolHandler: + protocol: str + + url: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["protocol"] = self.protocol + json["url"] = self.url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ProtocolHandler: + return cls( + protocol=str(json["protocol"]), + url=str(json["url"]), + )
+ + + +
+[docs] +@dataclass +class RelatedApplication: + url: str + + id_: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + if self.id_ is not None: + json["id"] = self.id_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RelatedApplication: + return cls( + url=str(json["url"]), + id_=str(json["id"]) if json.get("id", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class ScopeExtension: + #: Instead of using tuple, this field always returns the serialized string + #: for easy understanding and comparison. + origin: str + + has_origin_wildcard: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["origin"] = self.origin + json["hasOriginWildcard"] = self.has_origin_wildcard + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScopeExtension: + return cls( + origin=str(json["origin"]), + has_origin_wildcard=bool(json["hasOriginWildcard"]), + )
+ + + +
+[docs] +@dataclass +class Screenshot: + image: ImageResource + + form_factor: str + + label: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["image"] = self.image.to_json() + json["formFactor"] = self.form_factor + if self.label is not None: + json["label"] = self.label + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Screenshot: + return cls( + image=ImageResource.from_json(json["image"]), + form_factor=str(json["formFactor"]), + label=str(json["label"]) if json.get("label", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class ShareTarget: + action: str + + method: str + + enctype: str + + #: Embed the ShareTargetParams + title: typing.Optional[str] = None + + text: typing.Optional[str] = None + + url: typing.Optional[str] = None + + files: typing.Optional[typing.List[FileFilter]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["action"] = self.action + json["method"] = self.method + json["enctype"] = self.enctype + if self.title is not None: + json["title"] = self.title + if self.text is not None: + json["text"] = self.text + if self.url is not None: + json["url"] = self.url + if self.files is not None: + json["files"] = [i.to_json() for i in self.files] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ShareTarget: + return cls( + action=str(json["action"]), + method=str(json["method"]), + enctype=str(json["enctype"]), + title=str(json["title"]) if json.get("title", None) is not None else None, + text=str(json["text"]) if json.get("text", None) is not None else None, + url=str(json["url"]) if json.get("url", None) is not None else None, + files=( + [FileFilter.from_json(i) for i in json["files"]] + if json.get("files", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Shortcut: + name: str + + url: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["url"] = self.url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Shortcut: + return cls( + name=str(json["name"]), + url=str(json["url"]), + )
+ + + +
+[docs] +@dataclass +class WebAppManifest: + background_color: typing.Optional[str] = None + + #: The extra description provided by the manifest. + description: typing.Optional[str] = None + + dir_: typing.Optional[str] = None + + display: typing.Optional[str] = None + + #: The overrided display mode controlled by the user. + display_overrides: typing.Optional[typing.List[str]] = None + + #: The handlers to open files. + file_handlers: typing.Optional[typing.List[FileHandler]] = None + + icons: typing.Optional[typing.List[ImageResource]] = None + + id_: typing.Optional[str] = None + + lang: typing.Optional[str] = None + + #: TODO(crbug.com/1231886): This field is non-standard and part of a Chrome + #: experiment. See: + #: https://github.com/WICG/web-app-launch/blob/main/launch_handler.md + launch_handler: typing.Optional[LaunchHandler] = None + + name: typing.Optional[str] = None + + orientation: typing.Optional[str] = None + + prefer_related_applications: typing.Optional[bool] = None + + #: The handlers to open protocols. + protocol_handlers: typing.Optional[typing.List[ProtocolHandler]] = None + + related_applications: typing.Optional[typing.List[RelatedApplication]] = None + + scope: typing.Optional[str] = None + + #: Non-standard, see + #: https://github.com/WICG/manifest-incubations/blob/gh-pages/scope_extensions-explainer.md + scope_extensions: typing.Optional[typing.List[ScopeExtension]] = None + + #: The screenshots used by chromium. + screenshots: typing.Optional[typing.List[Screenshot]] = None + + share_target: typing.Optional[ShareTarget] = None + + short_name: typing.Optional[str] = None + + shortcuts: typing.Optional[typing.List[Shortcut]] = None + + start_url: typing.Optional[str] = None + + theme_color: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.background_color is not None: + json["backgroundColor"] = self.background_color + if self.description is not None: + json["description"] = self.description + if self.dir_ is not None: + json["dir"] = self.dir_ + if self.display is not None: + json["display"] = self.display + if self.display_overrides is not None: + json["displayOverrides"] = [i for i in self.display_overrides] + if self.file_handlers is not None: + json["fileHandlers"] = [i.to_json() for i in self.file_handlers] + if self.icons is not None: + json["icons"] = [i.to_json() for i in self.icons] + if self.id_ is not None: + json["id"] = self.id_ + if self.lang is not None: + json["lang"] = self.lang + if self.launch_handler is not None: + json["launchHandler"] = self.launch_handler.to_json() + if self.name is not None: + json["name"] = self.name + if self.orientation is not None: + json["orientation"] = self.orientation + if self.prefer_related_applications is not None: + json["preferRelatedApplications"] = self.prefer_related_applications + if self.protocol_handlers is not None: + json["protocolHandlers"] = [i.to_json() for i in self.protocol_handlers] + if self.related_applications is not None: + json["relatedApplications"] = [ + i.to_json() for i in self.related_applications + ] + if self.scope is not None: + json["scope"] = self.scope + if self.scope_extensions is not None: + json["scopeExtensions"] = [i.to_json() for i in self.scope_extensions] + if self.screenshots is not None: + json["screenshots"] = [i.to_json() for i in self.screenshots] + if self.share_target is not None: + json["shareTarget"] = self.share_target.to_json() + if self.short_name is not None: + json["shortName"] = self.short_name + if self.shortcuts is not None: + json["shortcuts"] = [i.to_json() for i in self.shortcuts] + if self.start_url is not None: + json["startUrl"] = self.start_url + if self.theme_color is not None: + json["themeColor"] = self.theme_color + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WebAppManifest: + return cls( + background_color=( + str(json["backgroundColor"]) + if json.get("backgroundColor", None) is not None + else None + ), + description=( + str(json["description"]) + if json.get("description", None) is not None + else None + ), + dir_=str(json["dir"]) if json.get("dir", None) is not None else None, + display=( + str(json["display"]) if json.get("display", None) is not None else None + ), + display_overrides=( + [str(i) for i in json["displayOverrides"]] + if json.get("displayOverrides", None) is not None + else None + ), + file_handlers=( + [FileHandler.from_json(i) for i in json["fileHandlers"]] + if json.get("fileHandlers", None) is not None + else None + ), + icons=( + [ImageResource.from_json(i) for i in json["icons"]] + if json.get("icons", None) is not None + else None + ), + id_=str(json["id"]) if json.get("id", None) is not None else None, + lang=str(json["lang"]) if json.get("lang", None) is not None else None, + launch_handler=( + LaunchHandler.from_json(json["launchHandler"]) + if json.get("launchHandler", None) is not None + else None + ), + name=str(json["name"]) if json.get("name", None) is not None else None, + orientation=( + str(json["orientation"]) + if json.get("orientation", None) is not None + else None + ), + prefer_related_applications=( + bool(json["preferRelatedApplications"]) + if json.get("preferRelatedApplications", None) is not None + else None + ), + protocol_handlers=( + [ProtocolHandler.from_json(i) for i in json["protocolHandlers"]] + if json.get("protocolHandlers", None) is not None + else None + ), + related_applications=( + [RelatedApplication.from_json(i) for i in json["relatedApplications"]] + if json.get("relatedApplications", None) is not None + else None + ), + scope=str(json["scope"]) if json.get("scope", None) is not None else None, + scope_extensions=( + [ScopeExtension.from_json(i) for i in json["scopeExtensions"]] + if json.get("scopeExtensions", None) is not None + else None + ), + screenshots=( + [Screenshot.from_json(i) for i in json["screenshots"]] + if json.get("screenshots", None) is not None + else None + ), + share_target=( + ShareTarget.from_json(json["shareTarget"]) + if json.get("shareTarget", None) is not None + else None + ), + short_name=( + str(json["shortName"]) + if json.get("shortName", None) is not None + else None + ), + shortcuts=( + [Shortcut.from_json(i) for i in json["shortcuts"]] + if json.get("shortcuts", None) is not None + else None + ), + start_url=( + str(json["startUrl"]) + if json.get("startUrl", None) is not None + else None + ), + theme_color=( + str(json["themeColor"]) + if json.get("themeColor", None) is not None + else None + ), + )
+ + + +
+[docs] +class AutoResponseMode(enum.Enum): + """ + Enum of possible auto-response for permission / prompt dialogs. + """ + + NONE = "none" + AUTO_ACCEPT = "autoAccept" + AUTO_REJECT = "autoReject" + AUTO_OPT_OUT = "autoOptOut" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AutoResponseMode: + return cls(json)
+ + + + + + + +
+[docs] +class BackForwardCacheNotRestoredReason(enum.Enum): + """ + List of not restored reasons for back-forward cache. + """ + + NOT_PRIMARY_MAIN_FRAME = "NotPrimaryMainFrame" + BACK_FORWARD_CACHE_DISABLED = "BackForwardCacheDisabled" + RELATED_ACTIVE_CONTENTS_EXIST = "RelatedActiveContentsExist" + HTTP_STATUS_NOT_OK = "HTTPStatusNotOK" + SCHEME_NOT_HTTP_OR_HTTPS = "SchemeNotHTTPOrHTTPS" + LOADING = "Loading" + WAS_GRANTED_MEDIA_ACCESS = "WasGrantedMediaAccess" + DISABLE_FOR_RENDER_FRAME_HOST_CALLED = "DisableForRenderFrameHostCalled" + DOMAIN_NOT_ALLOWED = "DomainNotAllowed" + HTTP_METHOD_NOT_GET = "HTTPMethodNotGET" + SUBFRAME_IS_NAVIGATING = "SubframeIsNavigating" + TIMEOUT = "Timeout" + CACHE_LIMIT = "CacheLimit" + JAVA_SCRIPT_EXECUTION = "JavaScriptExecution" + RENDERER_PROCESS_KILLED = "RendererProcessKilled" + RENDERER_PROCESS_CRASHED = "RendererProcessCrashed" + SCHEDULER_TRACKED_FEATURE_USED = "SchedulerTrackedFeatureUsed" + CONFLICTING_BROWSING_INSTANCE = "ConflictingBrowsingInstance" + CACHE_FLUSHED = "CacheFlushed" + SERVICE_WORKER_VERSION_ACTIVATION = "ServiceWorkerVersionActivation" + SESSION_RESTORED = "SessionRestored" + SERVICE_WORKER_POST_MESSAGE = "ServiceWorkerPostMessage" + ENTERED_BACK_FORWARD_CACHE_BEFORE_SERVICE_WORKER_HOST_ADDED = ( + "EnteredBackForwardCacheBeforeServiceWorkerHostAdded" + ) + RENDER_FRAME_HOST_REUSED_SAME_SITE = "RenderFrameHostReused_SameSite" + RENDER_FRAME_HOST_REUSED_CROSS_SITE = "RenderFrameHostReused_CrossSite" + SERVICE_WORKER_CLAIM = "ServiceWorkerClaim" + IGNORE_EVENT_AND_EVICT = "IgnoreEventAndEvict" + HAVE_INNER_CONTENTS = "HaveInnerContents" + TIMEOUT_PUTTING_IN_CACHE = "TimeoutPuttingInCache" + BACK_FORWARD_CACHE_DISABLED_BY_LOW_MEMORY = "BackForwardCacheDisabledByLowMemory" + BACK_FORWARD_CACHE_DISABLED_BY_COMMAND_LINE = ( + "BackForwardCacheDisabledByCommandLine" + ) + NETWORK_REQUEST_DATAPIPE_DRAINED_AS_BYTES_CONSUMER = ( + "NetworkRequestDatapipeDrainedAsBytesConsumer" + ) + NETWORK_REQUEST_REDIRECTED = "NetworkRequestRedirected" + NETWORK_REQUEST_TIMEOUT = "NetworkRequestTimeout" + NETWORK_EXCEEDS_BUFFER_LIMIT = "NetworkExceedsBufferLimit" + NAVIGATION_CANCELLED_WHILE_RESTORING = "NavigationCancelledWhileRestoring" + NOT_MOST_RECENT_NAVIGATION_ENTRY = "NotMostRecentNavigationEntry" + BACK_FORWARD_CACHE_DISABLED_FOR_PRERENDER = "BackForwardCacheDisabledForPrerender" + USER_AGENT_OVERRIDE_DIFFERS = "UserAgentOverrideDiffers" + FOREGROUND_CACHE_LIMIT = "ForegroundCacheLimit" + BROWSING_INSTANCE_NOT_SWAPPED = "BrowsingInstanceNotSwapped" + BACK_FORWARD_CACHE_DISABLED_FOR_DELEGATE = "BackForwardCacheDisabledForDelegate" + UNLOAD_HANDLER_EXISTS_IN_MAIN_FRAME = "UnloadHandlerExistsInMainFrame" + UNLOAD_HANDLER_EXISTS_IN_SUB_FRAME = "UnloadHandlerExistsInSubFrame" + SERVICE_WORKER_UNREGISTRATION = "ServiceWorkerUnregistration" + CACHE_CONTROL_NO_STORE = "CacheControlNoStore" + CACHE_CONTROL_NO_STORE_COOKIE_MODIFIED = "CacheControlNoStoreCookieModified" + CACHE_CONTROL_NO_STORE_HTTP_ONLY_COOKIE_MODIFIED = ( + "CacheControlNoStoreHTTPOnlyCookieModified" + ) + NO_RESPONSE_HEAD = "NoResponseHead" + UNKNOWN = "Unknown" + ACTIVATION_NAVIGATIONS_DISALLOWED_FOR_BUG1234857 = ( + "ActivationNavigationsDisallowedForBug1234857" + ) + ERROR_DOCUMENT = "ErrorDocument" + FENCED_FRAMES_EMBEDDER = "FencedFramesEmbedder" + COOKIE_DISABLED = "CookieDisabled" + HTTP_AUTH_REQUIRED = "HTTPAuthRequired" + COOKIE_FLUSHED = "CookieFlushed" + BROADCAST_CHANNEL_ON_MESSAGE = "BroadcastChannelOnMessage" + WEB_VIEW_SETTINGS_CHANGED = "WebViewSettingsChanged" + WEB_VIEW_JAVA_SCRIPT_OBJECT_CHANGED = "WebViewJavaScriptObjectChanged" + WEB_VIEW_MESSAGE_LISTENER_INJECTED = "WebViewMessageListenerInjected" + WEB_VIEW_SAFE_BROWSING_ALLOWLIST_CHANGED = "WebViewSafeBrowsingAllowlistChanged" + WEB_VIEW_DOCUMENT_START_JAVASCRIPT_CHANGED = "WebViewDocumentStartJavascriptChanged" + WEB_SOCKET = "WebSocket" + WEB_TRANSPORT = "WebTransport" + WEB_RTC = "WebRTC" + MAIN_RESOURCE_HAS_CACHE_CONTROL_NO_STORE = "MainResourceHasCacheControlNoStore" + MAIN_RESOURCE_HAS_CACHE_CONTROL_NO_CACHE = "MainResourceHasCacheControlNoCache" + SUBRESOURCE_HAS_CACHE_CONTROL_NO_STORE = "SubresourceHasCacheControlNoStore" + SUBRESOURCE_HAS_CACHE_CONTROL_NO_CACHE = "SubresourceHasCacheControlNoCache" + CONTAINS_PLUGINS = "ContainsPlugins" + DOCUMENT_LOADED = "DocumentLoaded" + OUTSTANDING_NETWORK_REQUEST_OTHERS = "OutstandingNetworkRequestOthers" + REQUESTED_MIDI_PERMISSION = "RequestedMIDIPermission" + REQUESTED_AUDIO_CAPTURE_PERMISSION = "RequestedAudioCapturePermission" + REQUESTED_VIDEO_CAPTURE_PERMISSION = "RequestedVideoCapturePermission" + REQUESTED_BACK_FORWARD_CACHE_BLOCKED_SENSORS = ( + "RequestedBackForwardCacheBlockedSensors" + ) + REQUESTED_BACKGROUND_WORK_PERMISSION = "RequestedBackgroundWorkPermission" + BROADCAST_CHANNEL = "BroadcastChannel" + WEB_XR = "WebXR" + SHARED_WORKER = "SharedWorker" + WEB_LOCKS = "WebLocks" + WEB_HID = "WebHID" + WEB_SHARE = "WebShare" + REQUESTED_STORAGE_ACCESS_GRANT = "RequestedStorageAccessGrant" + WEB_NFC = "WebNfc" + OUTSTANDING_NETWORK_REQUEST_FETCH = "OutstandingNetworkRequestFetch" + OUTSTANDING_NETWORK_REQUEST_XHR = "OutstandingNetworkRequestXHR" + APP_BANNER = "AppBanner" + PRINTING = "Printing" + WEB_DATABASE = "WebDatabase" + PICTURE_IN_PICTURE = "PictureInPicture" + PORTAL = "Portal" + SPEECH_RECOGNIZER = "SpeechRecognizer" + IDLE_MANAGER = "IdleManager" + PAYMENT_MANAGER = "PaymentManager" + SPEECH_SYNTHESIS = "SpeechSynthesis" + KEYBOARD_LOCK = "KeyboardLock" + WEB_OTP_SERVICE = "WebOTPService" + OUTSTANDING_NETWORK_REQUEST_DIRECT_SOCKET = "OutstandingNetworkRequestDirectSocket" + INJECTED_JAVASCRIPT = "InjectedJavascript" + INJECTED_STYLE_SHEET = "InjectedStyleSheet" + KEEPALIVE_REQUEST = "KeepaliveRequest" + INDEXED_DB_EVENT = "IndexedDBEvent" + DUMMY = "Dummy" + JS_NETWORK_REQUEST_RECEIVED_CACHE_CONTROL_NO_STORE_RESOURCE = ( + "JsNetworkRequestReceivedCacheControlNoStoreResource" + ) + WEB_RTC_STICKY = "WebRTCSticky" + WEB_TRANSPORT_STICKY = "WebTransportSticky" + WEB_SOCKET_STICKY = "WebSocketSticky" + SMART_CARD = "SmartCard" + LIVE_MEDIA_STREAM_TRACK = "LiveMediaStreamTrack" + UNLOAD_HANDLER = "UnloadHandler" + PARSER_ABORTED = "ParserAborted" + CONTENT_SECURITY_HANDLER = "ContentSecurityHandler" + CONTENT_WEB_AUTHENTICATION_API = "ContentWebAuthenticationAPI" + CONTENT_FILE_CHOOSER = "ContentFileChooser" + CONTENT_SERIAL = "ContentSerial" + CONTENT_FILE_SYSTEM_ACCESS = "ContentFileSystemAccess" + CONTENT_MEDIA_DEVICES_DISPATCHER_HOST = "ContentMediaDevicesDispatcherHost" + CONTENT_WEB_BLUETOOTH = "ContentWebBluetooth" + CONTENT_WEB_USB = "ContentWebUSB" + CONTENT_MEDIA_SESSION_SERVICE = "ContentMediaSessionService" + CONTENT_SCREEN_READER = "ContentScreenReader" + EMBEDDER_POPUP_BLOCKER_TAB_HELPER = "EmbedderPopupBlockerTabHelper" + EMBEDDER_SAFE_BROWSING_TRIGGERED_POPUP_BLOCKER = ( + "EmbedderSafeBrowsingTriggeredPopupBlocker" + ) + EMBEDDER_SAFE_BROWSING_THREAT_DETAILS = "EmbedderSafeBrowsingThreatDetails" + EMBEDDER_APP_BANNER_MANAGER = "EmbedderAppBannerManager" + EMBEDDER_DOM_DISTILLER_VIEWER_SOURCE = "EmbedderDomDistillerViewerSource" + EMBEDDER_DOM_DISTILLER_SELF_DELETING_REQUEST_DELEGATE = ( + "EmbedderDomDistillerSelfDeletingRequestDelegate" + ) + EMBEDDER_OOM_INTERVENTION_TAB_HELPER = "EmbedderOomInterventionTabHelper" + EMBEDDER_OFFLINE_PAGE = "EmbedderOfflinePage" + EMBEDDER_CHROME_PASSWORD_MANAGER_CLIENT_BIND_CREDENTIAL_MANAGER = ( + "EmbedderChromePasswordManagerClientBindCredentialManager" + ) + EMBEDDER_PERMISSION_REQUEST_MANAGER = "EmbedderPermissionRequestManager" + EMBEDDER_MODAL_DIALOG = "EmbedderModalDialog" + EMBEDDER_EXTENSIONS = "EmbedderExtensions" + EMBEDDER_EXTENSION_MESSAGING = "EmbedderExtensionMessaging" + EMBEDDER_EXTENSION_MESSAGING_FOR_OPEN_PORT = "EmbedderExtensionMessagingForOpenPort" + EMBEDDER_EXTENSION_SENT_MESSAGE_TO_CACHED_FRAME = ( + "EmbedderExtensionSentMessageToCachedFrame" + ) + REQUESTED_BY_WEB_VIEW_CLIENT = "RequestedByWebViewClient" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> BackForwardCacheNotRestoredReason: + return cls(json)
+ + + +
+[docs] +class BackForwardCacheNotRestoredReasonType(enum.Enum): + """ + Types of not restored reasons for back-forward cache. + """ + + SUPPORT_PENDING = "SupportPending" + PAGE_SUPPORT_NEEDED = "PageSupportNeeded" + CIRCUMSTANTIAL = "Circumstantial" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> BackForwardCacheNotRestoredReasonType: + return cls(json)
+ + + +
+[docs] +@dataclass +class BackForwardCacheBlockingDetails: + #: Line number in the script (0-based). + line_number: int + + #: Column number in the script (0-based). + column_number: int + + #: Url of the file where blockage happened. Optional because of tests. + url: typing.Optional[str] = None + + #: Function name where blockage happened. Optional because of anonymous functions and tests. + function: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + if self.url is not None: + json["url"] = self.url + if self.function is not None: + json["function"] = self.function + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackForwardCacheBlockingDetails: + return cls( + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + url=str(json["url"]) if json.get("url", None) is not None else None, + function=( + str(json["function"]) + if json.get("function", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class BackForwardCacheNotRestoredExplanation: + #: Type of the reason + type_: BackForwardCacheNotRestoredReasonType + + #: Not restored reason + reason: BackForwardCacheNotRestoredReason + + #: Context associated with the reason. The meaning of this context is + #: dependent on the reason: + #: - EmbedderExtensionSentMessageToCachedFrame: the extension ID. + context: typing.Optional[str] = None + + details: typing.Optional[typing.List[BackForwardCacheBlockingDetails]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_.to_json() + json["reason"] = self.reason.to_json() + if self.context is not None: + json["context"] = self.context + if self.details is not None: + json["details"] = [i.to_json() for i in self.details] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackForwardCacheNotRestoredExplanation: + return cls( + type_=BackForwardCacheNotRestoredReasonType.from_json(json["type"]), + reason=BackForwardCacheNotRestoredReason.from_json(json["reason"]), + context=( + str(json["context"]) if json.get("context", None) is not None else None + ), + details=( + [BackForwardCacheBlockingDetails.from_json(i) for i in json["details"]] + if json.get("details", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class BackForwardCacheNotRestoredExplanationTree: + #: URL of each frame + url: str + + #: Not restored reasons of each frame + explanations: typing.List[BackForwardCacheNotRestoredExplanation] + + #: Array of children frame + children: typing.List[BackForwardCacheNotRestoredExplanationTree] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["explanations"] = [i.to_json() for i in self.explanations] + json["children"] = [i.to_json() for i in self.children] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackForwardCacheNotRestoredExplanationTree: + return cls( + url=str(json["url"]), + explanations=[ + BackForwardCacheNotRestoredExplanation.from_json(i) + for i in json["explanations"] + ], + children=[ + BackForwardCacheNotRestoredExplanationTree.from_json(i) + for i in json["children"] + ], + )
+ + + +
+[docs] +@deprecated(version="1.3") +def add_script_to_evaluate_on_load( + script_source: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, ScriptIdentifier]: + """ + Deprecated, please use addScriptToEvaluateOnNewDocument instead. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param script_source: + :returns: Identifier of the added script. + """ + params: T_JSON_DICT = dict() + params["scriptSource"] = script_source + cmd_dict: T_JSON_DICT = { + "method": "Page.addScriptToEvaluateOnLoad", + "params": params, + } + json = yield cmd_dict + return ScriptIdentifier.from_json(json["identifier"])
+ + + +
+[docs] +def add_script_to_evaluate_on_new_document( + source: str, + world_name: typing.Optional[str] = None, + include_command_line_api: typing.Optional[bool] = None, + run_immediately: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, ScriptIdentifier]: + """ + Evaluates given script in every frame upon creation (before loading frame's scripts). + + :param source: + :param world_name: **(EXPERIMENTAL)** *(Optional)* If specified, creates an isolated world with the given name and evaluates given script in it. This world name will be used as the ExecutionContextDescription::name when the corresponding event is emitted. + :param include_command_line_api: **(EXPERIMENTAL)** *(Optional)* Specifies whether command line API should be available to the script, defaults to false. + :param run_immediately: **(EXPERIMENTAL)** *(Optional)* If true, runs the script immediately on existing execution contexts or worlds. Default: false. + :returns: Identifier of the added script. + """ + params: T_JSON_DICT = dict() + params["source"] = source + if world_name is not None: + params["worldName"] = world_name + if include_command_line_api is not None: + params["includeCommandLineAPI"] = include_command_line_api + if run_immediately is not None: + params["runImmediately"] = run_immediately + cmd_dict: T_JSON_DICT = { + "method": "Page.addScriptToEvaluateOnNewDocument", + "params": params, + } + json = yield cmd_dict + return ScriptIdentifier.from_json(json["identifier"])
+ + + +
+[docs] +def bring_to_front() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Brings page to front (activates tab). + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.bringToFront", + } + json = yield cmd_dict
+ + + +
+[docs] +def capture_screenshot( + format_: typing.Optional[str] = None, + quality: typing.Optional[int] = None, + clip: typing.Optional[Viewport] = None, + from_surface: typing.Optional[bool] = None, + capture_beyond_viewport: typing.Optional[bool] = None, + optimize_for_speed: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Capture page screenshot. + + :param format_: *(Optional)* Image compression format (defaults to png). + :param quality: *(Optional)* Compression quality from range [0..100] (jpeg only). + :param clip: *(Optional)* Capture the screenshot of a given region only. + :param from_surface: **(EXPERIMENTAL)** *(Optional)* Capture the screenshot from the surface, rather than the view. Defaults to true. + :param capture_beyond_viewport: **(EXPERIMENTAL)** *(Optional)* Capture the screenshot beyond the viewport. Defaults to false. + :param optimize_for_speed: **(EXPERIMENTAL)** *(Optional)* Optimize image encoding for speed, not for resulting size (defaults to false) + :returns: Base64-encoded image data. (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + if format_ is not None: + params["format"] = format_ + if quality is not None: + params["quality"] = quality + if clip is not None: + params["clip"] = clip.to_json() + if from_surface is not None: + params["fromSurface"] = from_surface + if capture_beyond_viewport is not None: + params["captureBeyondViewport"] = capture_beyond_viewport + if optimize_for_speed is not None: + params["optimizeForSpeed"] = optimize_for_speed + cmd_dict: T_JSON_DICT = { + "method": "Page.captureScreenshot", + "params": params, + } + json = yield cmd_dict + return str(json["data"])
+ + + +
+[docs] +def capture_snapshot( + format_: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns a snapshot of the page as a string. For MHTML format, the serialization includes + iframes, shadow DOM, external resources, and element-inline styles. + + **EXPERIMENTAL** + + :param format_: *(Optional)* Format (defaults to mhtml). + :returns: Serialized page data. + """ + params: T_JSON_DICT = dict() + if format_ is not None: + params["format"] = format_ + cmd_dict: T_JSON_DICT = { + "method": "Page.captureSnapshot", + "params": params, + } + json = yield cmd_dict + return str(json["data"])
+ + + +
+[docs] +@deprecated(version="1.3") +def clear_device_metrics_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears the overridden device metrics. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.clearDeviceMetricsOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def clear_device_orientation_override() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, None] +): + """ + Clears the overridden Device Orientation. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.clearDeviceOrientationOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def clear_geolocation_override() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears the overridden Geolocation Position and Error. + + .. deprecated:: 1.3 + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.clearGeolocationOverride", + } + json = yield cmd_dict
+ + + +
+[docs] +def create_isolated_world( + frame_id: FrameId, + world_name: typing.Optional[str] = None, + grant_univeral_access: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, runtime.ExecutionContextId]: + """ + Creates an isolated world for the given frame. + + :param frame_id: Id of the frame in which the isolated world should be created. + :param world_name: *(Optional)* An optional name which is reported in the Execution Context. + :param grant_univeral_access: *(Optional)* Whether or not universal access should be granted to the isolated world. This is a powerful option, use with caution. + :returns: Execution context of the isolated world. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + if world_name is not None: + params["worldName"] = world_name + if grant_univeral_access is not None: + params["grantUniveralAccess"] = grant_univeral_access + cmd_dict: T_JSON_DICT = { + "method": "Page.createIsolatedWorld", + "params": params, + } + json = yield cmd_dict + return runtime.ExecutionContextId.from_json(json["executionContextId"])
+ + + + + + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables page domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables page domain notifications. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_app_manifest( + manifest_id: typing.Optional[str] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + str, + typing.List[AppManifestError], + typing.Optional[str], + typing.Optional[AppManifestParsedProperties], + WebAppManifest, + ], +]: + """ + Gets the processed manifest for this current document. + This API always waits for the manifest to be loaded. + If manifestId is provided, and it does not match the manifest of the + current document, this API errors out. + If there is not a loaded page, this API errors out immediately. + + :param manifest_id: *(Optional)* + :returns: A tuple with the following items: + + 0. **url** - Manifest location. + 1. **errors** - + 2. **data** - *(Optional)* Manifest content. + 3. **parsed** - *(Optional)* Parsed manifest properties. Deprecated, use manifest instead. + 4. **manifest** - + """ + params: T_JSON_DICT = dict() + if manifest_id is not None: + params["manifestId"] = manifest_id + cmd_dict: T_JSON_DICT = { + "method": "Page.getAppManifest", + "params": params, + } + json = yield cmd_dict + return ( + str(json["url"]), + [AppManifestError.from_json(i) for i in json["errors"]], + str(json["data"]) if json.get("data", None) is not None else None, + ( + AppManifestParsedProperties.from_json(json["parsed"]) + if json.get("parsed", None) is not None + else None + ), + WebAppManifest.from_json(json["manifest"]), + )
+ + + +
+[docs] +def get_installability_errors() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[InstallabilityError]] +): + """ + + + **EXPERIMENTAL** + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getInstallabilityErrors", + } + json = yield cmd_dict + return [InstallabilityError.from_json(i) for i in json["installabilityErrors"]]
+ + + +
+[docs] +@deprecated(version="1.3") +def get_manifest_icons() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[str]] +): + """ + Deprecated because it's not guaranteed that the returned icon is in fact the one used for PWA installation. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getManifestIcons", + } + json = yield cmd_dict + return ( + str(json["primaryIcon"]) if json.get("primaryIcon", None) is not None else None + )
+ + + +
+[docs] +def get_app_id() -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[typing.Optional[str], typing.Optional[str]], +]: + """ + Returns the unique (PWA) app id. + Only returns values if the feature flag 'WebAppEnableManifestId' is enabled + + **EXPERIMENTAL** + + :returns: A tuple with the following items: + + 0. **appId** - *(Optional)* App id, either from manifest's id attribute or computed from start_url + 1. **recommendedId** - *(Optional)* Recommendation for manifest's id attribute to match current id computed from start_url + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getAppId", + } + json = yield cmd_dict + return ( + str(json["appId"]) if json.get("appId", None) is not None else None, + ( + str(json["recommendedId"]) + if json.get("recommendedId", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_ad_script_id( + frame_id: FrameId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[AdScriptId]]: + """ + + + **EXPERIMENTAL** + + :param frame_id: + :returns: *(Optional)* Identifies the bottom-most script which caused the frame to be labelled as an ad. Only sent if frame is labelled as an ad and id is available. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.getAdScriptId", + "params": params, + } + json = yield cmd_dict + return ( + AdScriptId.from_json(json["adScriptId"]) + if json.get("adScriptId", None) is not None + else None + )
+ + + +
+[docs] +def get_frame_tree() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, FrameTree]: + """ + Returns present frame tree structure. + + :returns: Present frame tree structure. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getFrameTree", + } + json = yield cmd_dict + return FrameTree.from_json(json["frameTree"])
+ + + +
+[docs] +def get_layout_metrics() -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + LayoutViewport, + VisualViewport, + dom.Rect, + LayoutViewport, + VisualViewport, + dom.Rect, + ], +]: + """ + Returns metrics relating to the layouting of the page, such as viewport bounds/scale. + + :returns: A tuple with the following items: + + 0. **layoutViewport** - Deprecated metrics relating to the layout viewport. Is in device pixels. Use ``cssLayoutViewport`` instead. + 1. **visualViewport** - Deprecated metrics relating to the visual viewport. Is in device pixels. Use ``cssVisualViewport`` instead. + 2. **contentSize** - Deprecated size of scrollable area. Is in DP. Use ``cssContentSize`` instead. + 3. **cssLayoutViewport** - Metrics relating to the layout viewport in CSS pixels. + 4. **cssVisualViewport** - Metrics relating to the visual viewport in CSS pixels. + 5. **cssContentSize** - Size of scrollable area in CSS pixels. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getLayoutMetrics", + } + json = yield cmd_dict + return ( + LayoutViewport.from_json(json["layoutViewport"]), + VisualViewport.from_json(json["visualViewport"]), + dom.Rect.from_json(json["contentSize"]), + LayoutViewport.from_json(json["cssLayoutViewport"]), + VisualViewport.from_json(json["cssVisualViewport"]), + dom.Rect.from_json(json["cssContentSize"]), + )
+ + + +
+[docs] +def get_navigation_history() -> ( + typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[int, typing.List[NavigationEntry]] + ] +): + """ + Returns navigation history for the current page. + + :returns: A tuple with the following items: + + 0. **currentIndex** - Index of the current navigation history entry. + 1. **entries** - Array of navigation history entries. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getNavigationHistory", + } + json = yield cmd_dict + return ( + int(json["currentIndex"]), + [NavigationEntry.from_json(i) for i in json["entries"]], + )
+ + + +
+[docs] +def reset_navigation_history() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resets navigation history for the current page. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.resetNavigationHistory", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_resource_content( + frame_id: FrameId, url: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]: + """ + Returns content of the given resource. + + **EXPERIMENTAL** + + :param frame_id: Frame id to get resource for. + :param url: URL of the resource to get content for. + :returns: A tuple with the following items: + + 0. **content** - Resource content. + 1. **base64Encoded** - True, if content was served as base64. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + params["url"] = url + cmd_dict: T_JSON_DICT = { + "method": "Page.getResourceContent", + "params": params, + } + json = yield cmd_dict + return (str(json["content"]), bool(json["base64Encoded"]))
+ + + +
+[docs] +def get_resource_tree() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, FrameResourceTree] +): + """ + Returns present frame / resource tree structure. + + **EXPERIMENTAL** + + :returns: Present frame / resource tree structure. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.getResourceTree", + } + json = yield cmd_dict + return FrameResourceTree.from_json(json["frameTree"])
+ + + +
+[docs] +def handle_java_script_dialog( + accept: bool, prompt_text: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Accepts or dismisses a JavaScript initiated dialog (alert, confirm, prompt, or onbeforeunload). + + :param accept: Whether to accept or dismiss the dialog. + :param prompt_text: *(Optional)* The text to enter into the dialog prompt before accepting. Used only if this is a prompt dialog. + """ + params: T_JSON_DICT = dict() + params["accept"] = accept + if prompt_text is not None: + params["promptText"] = prompt_text + cmd_dict: T_JSON_DICT = { + "method": "Page.handleJavaScriptDialog", + "params": params, + } + json = yield cmd_dict
+ + + + + + + +
+[docs] +def navigate_to_history_entry( + entry_id: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Navigates current page to the given history entry. + + :param entry_id: Unique id of the entry to navigate to. + """ + params: T_JSON_DICT = dict() + params["entryId"] = entry_id + cmd_dict: T_JSON_DICT = { + "method": "Page.navigateToHistoryEntry", + "params": params, + } + json = yield cmd_dict
+ + + + + + + +
+[docs] +def reload( + ignore_cache: typing.Optional[bool] = None, + script_to_evaluate_on_load: typing.Optional[str] = None, + loader_id: typing.Optional[network.LoaderId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Reloads given page optionally ignoring the cache. + + :param ignore_cache: *(Optional)* If true, browser cache is ignored (as if the user pressed Shift+refresh). + :param script_to_evaluate_on_load: *(Optional)* If set, the script will be injected into all frames of the inspected page after reload. Argument will be ignored if reloading dataURL origin. + :param loader_id: **(EXPERIMENTAL)** *(Optional)* If set, an error will be thrown if the target page's main frame's loader id does not match the provided id. This prevents accidentally reloading an unintended target in case there's a racing navigation. + """ + params: T_JSON_DICT = dict() + if ignore_cache is not None: + params["ignoreCache"] = ignore_cache + if script_to_evaluate_on_load is not None: + params["scriptToEvaluateOnLoad"] = script_to_evaluate_on_load + if loader_id is not None: + params["loaderId"] = loader_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.reload", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def remove_script_to_evaluate_on_load( + identifier: ScriptIdentifier, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deprecated, please use removeScriptToEvaluateOnNewDocument instead. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param identifier: + """ + params: T_JSON_DICT = dict() + params["identifier"] = identifier.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.removeScriptToEvaluateOnLoad", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_script_to_evaluate_on_new_document( + identifier: ScriptIdentifier, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes given script from the list. + + :param identifier: + """ + params: T_JSON_DICT = dict() + params["identifier"] = identifier.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.removeScriptToEvaluateOnNewDocument", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def screencast_frame_ack( + session_id: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Acknowledges that a screencast frame has been received by the frontend. + + **EXPERIMENTAL** + + :param session_id: Frame number. + """ + params: T_JSON_DICT = dict() + params["sessionId"] = session_id + cmd_dict: T_JSON_DICT = { + "method": "Page.screencastFrameAck", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def search_in_resource( + frame_id: FrameId, + url: str, + query: str, + case_sensitive: typing.Optional[bool] = None, + is_regex: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[debugger.SearchMatch]]: + """ + Searches for given string in resource content. + + **EXPERIMENTAL** + + :param frame_id: Frame id for resource to search in. + :param url: URL of the resource to search in. + :param query: String to search for. + :param case_sensitive: *(Optional)* If true, search is case sensitive. + :param is_regex: *(Optional)* If true, treats string parameter as regex. + :returns: List of search matches. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + params["url"] = url + params["query"] = query + if case_sensitive is not None: + params["caseSensitive"] = case_sensitive + if is_regex is not None: + params["isRegex"] = is_regex + cmd_dict: T_JSON_DICT = { + "method": "Page.searchInResource", + "params": params, + } + json = yield cmd_dict + return [debugger.SearchMatch.from_json(i) for i in json["result"]]
+ + + +
+[docs] +def set_ad_blocking_enabled( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable Chrome's experimental ad filter on all sites. + + **EXPERIMENTAL** + + :param enabled: Whether to block ads. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Page.setAdBlockingEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_bypass_csp(enabled: bool) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable page Content Security Policy by-passing. + + :param enabled: Whether to bypass page CSP. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Page.setBypassCSP", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_permissions_policy_state( + frame_id: FrameId, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.List[PermissionsPolicyFeatureState] +]: + """ + Get Permissions Policy state on given frame. + + **EXPERIMENTAL** + + :param frame_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.getPermissionsPolicyState", + "params": params, + } + json = yield cmd_dict + return [PermissionsPolicyFeatureState.from_json(i) for i in json["states"]]
+ + + +
+[docs] +def get_origin_trials( + frame_id: FrameId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[OriginTrial]]: + """ + Get Origin Trials on given frame. + + **EXPERIMENTAL** + + :param frame_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.getOriginTrials", + "params": params, + } + json = yield cmd_dict + return [OriginTrial.from_json(i) for i in json["originTrials"]]
+ + + +
+[docs] +@deprecated(version="1.3") +def set_device_metrics_override( + width: int, + height: int, + device_scale_factor: float, + mobile: bool, + scale: typing.Optional[float] = None, + screen_width: typing.Optional[int] = None, + screen_height: typing.Optional[int] = None, + position_x: typing.Optional[int] = None, + position_y: typing.Optional[int] = None, + dont_set_visible_size: typing.Optional[bool] = None, + screen_orientation: typing.Optional[emulation.ScreenOrientation] = None, + viewport: typing.Optional[Viewport] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the values of device screen dimensions (window.screen.width, window.screen.height, + window.innerWidth, window.innerHeight, and "device-width"/"device-height"-related CSS media + query results). + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param width: Overriding width value in pixels (minimum 0, maximum 10000000). 0 disables the override. + :param height: Overriding height value in pixels (minimum 0, maximum 10000000). 0 disables the override. + :param device_scale_factor: Overriding device scale factor value. 0 disables the override. + :param mobile: Whether to emulate mobile device. This includes viewport meta tag, overlay scrollbars, text autosizing and more. + :param scale: *(Optional)* Scale to apply to resulting view image. + :param screen_width: *(Optional)* Overriding screen width value in pixels (minimum 0, maximum 10000000). + :param screen_height: *(Optional)* Overriding screen height value in pixels (minimum 0, maximum 10000000). + :param position_x: *(Optional)* Overriding view X position on screen in pixels (minimum 0, maximum 10000000). + :param position_y: *(Optional)* Overriding view Y position on screen in pixels (minimum 0, maximum 10000000). + :param dont_set_visible_size: *(Optional)* Do not set visible view size, rely upon explicit setVisibleSize call. + :param screen_orientation: *(Optional)* Screen orientation override. + :param viewport: *(Optional)* The viewport dimensions and scale. If not set, the override is cleared. + """ + params: T_JSON_DICT = dict() + params["width"] = width + params["height"] = height + params["deviceScaleFactor"] = device_scale_factor + params["mobile"] = mobile + if scale is not None: + params["scale"] = scale + if screen_width is not None: + params["screenWidth"] = screen_width + if screen_height is not None: + params["screenHeight"] = screen_height + if position_x is not None: + params["positionX"] = position_x + if position_y is not None: + params["positionY"] = position_y + if dont_set_visible_size is not None: + params["dontSetVisibleSize"] = dont_set_visible_size + if screen_orientation is not None: + params["screenOrientation"] = screen_orientation.to_json() + if viewport is not None: + params["viewport"] = viewport.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.setDeviceMetricsOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_device_orientation_override( + alpha: float, beta: float, gamma: float +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the Device Orientation. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param alpha: Mock alpha + :param beta: Mock beta + :param gamma: Mock gamma + """ + params: T_JSON_DICT = dict() + params["alpha"] = alpha + params["beta"] = beta + params["gamma"] = gamma + cmd_dict: T_JSON_DICT = { + "method": "Page.setDeviceOrientationOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_font_families( + font_families: FontFamilies, + for_scripts: typing.Optional[typing.List[ScriptFontFamilies]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set generic font families. + + **EXPERIMENTAL** + + :param font_families: Specifies font families to set. If a font family is not specified, it won't be changed. + :param for_scripts: *(Optional)* Specifies font families to set for individual scripts. + """ + params: T_JSON_DICT = dict() + params["fontFamilies"] = font_families.to_json() + if for_scripts is not None: + params["forScripts"] = [i.to_json() for i in for_scripts] + cmd_dict: T_JSON_DICT = { + "method": "Page.setFontFamilies", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_font_sizes( + font_sizes: FontSizes, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set default font sizes. + + **EXPERIMENTAL** + + :param font_sizes: Specifies font sizes to set. If a font size is not specified, it won't be changed. + """ + params: T_JSON_DICT = dict() + params["fontSizes"] = font_sizes.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.setFontSizes", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_document_content( + frame_id: FrameId, html: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets given markup as the document's HTML. + + :param frame_id: Frame id to set HTML for. + :param html: HTML content to set. + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + params["html"] = html + cmd_dict: T_JSON_DICT = { + "method": "Page.setDocumentContent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_download_behavior( + behavior: str, download_path: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set the behavior when downloading a file. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param behavior: Whether to allow all or deny all download requests, or use default Chrome behavior if available (otherwise deny). + :param download_path: *(Optional)* The default path to save downloaded files to. This is required if behavior is set to 'allow' + """ + params: T_JSON_DICT = dict() + params["behavior"] = behavior + if download_path is not None: + params["downloadPath"] = download_path + cmd_dict: T_JSON_DICT = { + "method": "Page.setDownloadBehavior", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_geolocation_override( + latitude: typing.Optional[float] = None, + longitude: typing.Optional[float] = None, + accuracy: typing.Optional[float] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Overrides the Geolocation Position or Error. Omitting any of the parameters emulates position + unavailable. + + .. deprecated:: 1.3 + + :param latitude: *(Optional)* Mock latitude + :param longitude: *(Optional)* Mock longitude + :param accuracy: *(Optional)* Mock accuracy + """ + params: T_JSON_DICT = dict() + if latitude is not None: + params["latitude"] = latitude + if longitude is not None: + params["longitude"] = longitude + if accuracy is not None: + params["accuracy"] = accuracy + cmd_dict: T_JSON_DICT = { + "method": "Page.setGeolocationOverride", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_lifecycle_events_enabled( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Controls whether page will emit lifecycle events. + + :param enabled: If true, starts emitting lifecycle events. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Page.setLifecycleEventsEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_touch_emulation_enabled( + enabled: bool, configuration: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Toggles mouse event-based touch event emulation. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param enabled: Whether the touch event emulation should be enabled. + :param configuration: *(Optional)* Touch/gesture events configuration. Default: current platform. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + if configuration is not None: + params["configuration"] = configuration + cmd_dict: T_JSON_DICT = { + "method": "Page.setTouchEmulationEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_screencast( + format_: typing.Optional[str] = None, + quality: typing.Optional[int] = None, + max_width: typing.Optional[int] = None, + max_height: typing.Optional[int] = None, + every_nth_frame: typing.Optional[int] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Starts sending each frame using the ``screencastFrame`` event. + + **EXPERIMENTAL** + + :param format_: *(Optional)* Image compression format. + :param quality: *(Optional)* Compression quality from range [0..100]. + :param max_width: *(Optional)* Maximum screenshot width. + :param max_height: *(Optional)* Maximum screenshot height. + :param every_nth_frame: *(Optional)* Send every n-th frame. + """ + params: T_JSON_DICT = dict() + if format_ is not None: + params["format"] = format_ + if quality is not None: + params["quality"] = quality + if max_width is not None: + params["maxWidth"] = max_width + if max_height is not None: + params["maxHeight"] = max_height + if every_nth_frame is not None: + params["everyNthFrame"] = every_nth_frame + cmd_dict: T_JSON_DICT = { + "method": "Page.startScreencast", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_loading() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Force the page stop all navigations and pending resource fetches. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.stopLoading", + } + json = yield cmd_dict
+ + + +
+[docs] +def crash() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Crashes renderer on the IO thread, generates minidumps. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.crash", + } + json = yield cmd_dict
+ + + +
+[docs] +def close() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Tries to close page, running its beforeunload hooks, if any. + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.close", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_web_lifecycle_state( + state: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Tries to update the web lifecycle state of the page. + It will transition the page to the given state according to: + https://github.com/WICG/web-lifecycle/ + + **EXPERIMENTAL** + + :param state: Target lifecycle state + """ + params: T_JSON_DICT = dict() + params["state"] = state + cmd_dict: T_JSON_DICT = { + "method": "Page.setWebLifecycleState", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_screencast() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stops sending each frame in the ``screencastFrame``. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.stopScreencast", + } + json = yield cmd_dict
+ + + +
+[docs] +def produce_compilation_cache( + scripts: typing.List[CompilationCacheParams], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Requests backend to produce compilation cache for the specified scripts. + ``scripts`` are appended to the list of scripts for which the cache + would be produced. The list may be reset during page navigation. + When script with a matching URL is encountered, the cache is optionally + produced upon backend discretion, based on internal heuristics. + See also: ``Page.compilationCacheProduced``. + + **EXPERIMENTAL** + + :param scripts: + """ + params: T_JSON_DICT = dict() + params["scripts"] = [i.to_json() for i in scripts] + cmd_dict: T_JSON_DICT = { + "method": "Page.produceCompilationCache", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def add_compilation_cache( + url: str, data: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Seeds compilation cache for given url. Compilation cache does not survive + cross-process navigation. + + **EXPERIMENTAL** + + :param url: + :param data: Base64-encoded data (Encoded as a base64 string when passed over JSON) + """ + params: T_JSON_DICT = dict() + params["url"] = url + params["data"] = data + cmd_dict: T_JSON_DICT = { + "method": "Page.addCompilationCache", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_compilation_cache() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears seeded compilation cache. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.clearCompilationCache", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_spc_transaction_mode( + mode: AutoResponseMode, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets the Secure Payment Confirmation transaction mode. + https://w3c.github.io/secure-payment-confirmation/#sctn-automation-set-spc-transaction-mode + + **EXPERIMENTAL** + + :param mode: + """ + params: T_JSON_DICT = dict() + params["mode"] = mode.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.setSPCTransactionMode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_rph_registration_mode( + mode: AutoResponseMode, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Extensions for Custom Handlers API: + https://html.spec.whatwg.org/multipage/system-state.html#rph-automation + + **EXPERIMENTAL** + + :param mode: + """ + params: T_JSON_DICT = dict() + params["mode"] = mode.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Page.setRPHRegistrationMode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def generate_test_report( + message: str, group: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Generates a report for testing. + + **EXPERIMENTAL** + + :param message: Message to be displayed in the report. + :param group: *(Optional)* Specifies the endpoint group to deliver the report to. + """ + params: T_JSON_DICT = dict() + params["message"] = message + if group is not None: + params["group"] = group + cmd_dict: T_JSON_DICT = { + "method": "Page.generateTestReport", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def wait_for_debugger() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Pauses page execution. Can be resumed using generic Runtime.runIfWaitingForDebugger. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Page.waitForDebugger", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_intercept_file_chooser_dialog( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Intercept file chooser requests and transfer control to protocol clients. + When file chooser interception is enabled, native file chooser dialog is not shown. + Instead, a protocol event ``Page.fileChooserOpened`` is emitted. + + :param enabled: + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Page.setInterceptFileChooserDialog", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_prerendering_allowed( + is_allowed: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable/disable prerendering manually. + + This command is a short-term solution for https://crbug.com/1440085. + See https://docs.google.com/document/d/12HVmFxYj5Jc-eJr5OmWsa2bqTJsbgGLKI6ZIyx0_wpA + for more details. + + TODO(https://crbug.com/1440085): Remove this once Puppeteer supports tab targets. + + **EXPERIMENTAL** + + :param is_allowed: + """ + params: T_JSON_DICT = dict() + params["isAllowed"] = is_allowed + cmd_dict: T_JSON_DICT = { + "method": "Page.setPrerenderingAllowed", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Page.domContentEventFired") +@dataclass +class DomContentEventFired: + timestamp: network.MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DomContentEventFired: + return cls(timestamp=network.MonotonicTime.from_json(json["timestamp"]))
+ + + +
+[docs] +@event_class("Page.fileChooserOpened") +@dataclass +class FileChooserOpened: + """ + Emitted only when ``page.interceptFileChooser`` is enabled. + """ + + #: Id of the frame containing input node. + frame_id: FrameId + #: Input mode. + mode: str + #: Input node id. Only present for file choosers opened via an ``<input type="file">`` element. + backend_node_id: typing.Optional[dom.BackendNodeId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FileChooserOpened: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + mode=str(json["mode"]), + backend_node_id=( + dom.BackendNodeId.from_json(json["backendNodeId"]) + if json.get("backendNodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Page.frameAttached") +@dataclass +class FrameAttached: + """ + Fired when frame has been attached to its parent. + """ + + #: Id of the frame that has been attached. + frame_id: FrameId + #: Parent frame identifier. + parent_frame_id: FrameId + #: JavaScript stack trace of when frame was attached, only set if frame initiated from script. + stack: typing.Optional[runtime.StackTrace] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameAttached: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + parent_frame_id=FrameId.from_json(json["parentFrameId"]), + stack=( + runtime.StackTrace.from_json(json["stack"]) + if json.get("stack", None) is not None + else None + ), + )
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Page.frameClearedScheduledNavigation") +@dataclass +class FrameClearedScheduledNavigation: + """ + Fired when frame no longer has a scheduled navigation. + """ + + #: Id of the frame that has cleared its scheduled navigation. + frame_id: FrameId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameClearedScheduledNavigation: + return cls(frame_id=FrameId.from_json(json["frameId"]))
+ + + +
+[docs] +@event_class("Page.frameDetached") +@dataclass +class FrameDetached: + """ + Fired when frame has been detached from its parent. + """ + + #: Id of the frame that has been detached. + frame_id: FrameId + reason: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameDetached: + return cls( + frame_id=FrameId.from_json(json["frameId"]), reason=str(json["reason"]) + )
+ + + +
+[docs] +@event_class("Page.frameNavigated") +@dataclass +class FrameNavigated: + """ + Fired once navigation of the frame has completed. Frame is now associated with the new loader. + """ + + #: Frame object. + frame: Frame + type_: NavigationType + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameNavigated: + return cls( + frame=Frame.from_json(json["frame"]), + type_=NavigationType.from_json(json["type"]), + )
+ + + +
+[docs] +@event_class("Page.documentOpened") +@dataclass +class DocumentOpened: + """ + **EXPERIMENTAL** + + Fired when opening document to write to. + """ + + #: Frame object. + frame: Frame + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DocumentOpened: + return cls(frame=Frame.from_json(json["frame"]))
+ + + +
+[docs] +@event_class("Page.frameResized") +@dataclass +class FrameResized: + """ + **EXPERIMENTAL** + + + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameResized: + return cls()
+ + + +
+[docs] +@event_class("Page.frameRequestedNavigation") +@dataclass +class FrameRequestedNavigation: + """ + **EXPERIMENTAL** + + Fired when a renderer-initiated navigation is requested. + Navigation may still be cancelled after the event is issued. + """ + + #: Id of the frame that is being navigated. + frame_id: FrameId + #: The reason for the navigation. + reason: ClientNavigationReason + #: The destination URL for the requested navigation. + url: str + #: The disposition for the navigation. + disposition: ClientNavigationDisposition + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameRequestedNavigation: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + reason=ClientNavigationReason.from_json(json["reason"]), + url=str(json["url"]), + disposition=ClientNavigationDisposition.from_json(json["disposition"]), + )
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Page.frameScheduledNavigation") +@dataclass +class FrameScheduledNavigation: + """ + Fired when frame schedules a potential navigation. + """ + + #: Id of the frame that has scheduled a navigation. + frame_id: FrameId + #: Delay (in seconds) until the navigation is scheduled to begin. The navigation is not + #: guaranteed to start. + delay: float + #: The reason for the navigation. + reason: ClientNavigationReason + #: The destination URL for the scheduled navigation. + url: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameScheduledNavigation: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + delay=float(json["delay"]), + reason=ClientNavigationReason.from_json(json["reason"]), + url=str(json["url"]), + )
+ + + +
+[docs] +@event_class("Page.frameStartedLoading") +@dataclass +class FrameStartedLoading: + """ + **EXPERIMENTAL** + + Fired when frame has started loading. + """ + + #: Id of the frame that has started loading. + frame_id: FrameId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameStartedLoading: + return cls(frame_id=FrameId.from_json(json["frameId"]))
+ + + +
+[docs] +@event_class("Page.frameStoppedLoading") +@dataclass +class FrameStoppedLoading: + """ + **EXPERIMENTAL** + + Fired when frame has stopped loading. + """ + + #: Id of the frame that has stopped loading. + frame_id: FrameId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FrameStoppedLoading: + return cls(frame_id=FrameId.from_json(json["frameId"]))
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Page.downloadWillBegin") +@dataclass +class DownloadWillBegin: + """ + **EXPERIMENTAL** + + Fired when page is about to start a download. + Deprecated. Use Browser.downloadWillBegin instead. + """ + + #: Id of the frame that caused download to begin. + frame_id: FrameId + #: Global unique identifier of the download. + guid: str + #: URL of the resource being downloaded. + url: str + #: Suggested file name of the resource (the actual name of the file saved on disk may differ). + suggested_filename: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DownloadWillBegin: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + guid=str(json["guid"]), + url=str(json["url"]), + suggested_filename=str(json["suggestedFilename"]), + )
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Page.downloadProgress") +@dataclass +class DownloadProgress: + """ + **EXPERIMENTAL** + + Fired when download makes progress. Last call has ``done`` == true. + Deprecated. Use Browser.downloadProgress instead. + """ + + #: Global unique identifier of the download. + guid: str + #: Total expected bytes to download. + total_bytes: float + #: Total bytes received. + received_bytes: float + #: Download status. + state: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DownloadProgress: + return cls( + guid=str(json["guid"]), + total_bytes=float(json["totalBytes"]), + received_bytes=float(json["receivedBytes"]), + state=str(json["state"]), + )
+ + + +
+[docs] +@event_class("Page.interstitialHidden") +@dataclass +class InterstitialHidden: + """ + Fired when interstitial page was hidden + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InterstitialHidden: + return cls()
+ + + +
+[docs] +@event_class("Page.interstitialShown") +@dataclass +class InterstitialShown: + """ + Fired when interstitial page was shown + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InterstitialShown: + return cls()
+ + + +
+[docs] +@event_class("Page.javascriptDialogClosed") +@dataclass +class JavascriptDialogClosed: + """ + Fired when a JavaScript initiated dialog (alert, confirm, prompt, or onbeforeunload) has been + closed. + """ + + #: Whether dialog was confirmed. + result: bool + #: User input in case of prompt. + user_input: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> JavascriptDialogClosed: + return cls(result=bool(json["result"]), user_input=str(json["userInput"]))
+ + + +
+[docs] +@event_class("Page.javascriptDialogOpening") +@dataclass +class JavascriptDialogOpening: + """ + Fired when a JavaScript initiated dialog (alert, confirm, prompt, or onbeforeunload) is about to + open. + """ + + #: Frame url. + url: str + #: Message that will be displayed by the dialog. + message: str + #: Dialog type. + type_: DialogType + #: True iff browser is capable showing or acting on the given dialog. When browser has no + #: dialog handler for given target, calling alert while Page domain is engaged will stall + #: the page execution. Execution can be resumed via calling Page.handleJavaScriptDialog. + has_browser_handler: bool + #: Default dialog prompt. + default_prompt: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> JavascriptDialogOpening: + return cls( + url=str(json["url"]), + message=str(json["message"]), + type_=DialogType.from_json(json["type"]), + has_browser_handler=bool(json["hasBrowserHandler"]), + default_prompt=( + str(json["defaultPrompt"]) + if json.get("defaultPrompt", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Page.lifecycleEvent") +@dataclass +class LifecycleEvent: + """ + Fired for top level page lifecycle events such as navigation, load, paint, etc. + """ + + #: Id of the frame. + frame_id: FrameId + #: Loader identifier. Empty string if the request is fetched from worker. + loader_id: network.LoaderId + name: str + timestamp: network.MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LifecycleEvent: + return cls( + frame_id=FrameId.from_json(json["frameId"]), + loader_id=network.LoaderId.from_json(json["loaderId"]), + name=str(json["name"]), + timestamp=network.MonotonicTime.from_json(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Page.backForwardCacheNotUsed") +@dataclass +class BackForwardCacheNotUsed: + """ + **EXPERIMENTAL** + + Fired for failed bfcache history navigations if BackForwardCache feature is enabled. Do + not assume any ordering with the Page.frameNavigated event. This event is fired only for + main-frame history navigation where the document changes (non-same-document navigations), + when bfcache navigation fails. + """ + + #: The loader id for the associated navigation. + loader_id: network.LoaderId + #: The frame id of the associated frame. + frame_id: FrameId + #: Array of reasons why the page could not be cached. This must not be empty. + not_restored_explanations: typing.List[BackForwardCacheNotRestoredExplanation] + #: Tree structure of reasons why the page could not be cached for each frame. + not_restored_explanations_tree: typing.Optional[ + BackForwardCacheNotRestoredExplanationTree + ] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BackForwardCacheNotUsed: + return cls( + loader_id=network.LoaderId.from_json(json["loaderId"]), + frame_id=FrameId.from_json(json["frameId"]), + not_restored_explanations=[ + BackForwardCacheNotRestoredExplanation.from_json(i) + for i in json["notRestoredExplanations"] + ], + not_restored_explanations_tree=( + BackForwardCacheNotRestoredExplanationTree.from_json( + json["notRestoredExplanationsTree"] + ) + if json.get("notRestoredExplanationsTree", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Page.loadEventFired") +@dataclass +class LoadEventFired: + timestamp: network.MonotonicTime + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LoadEventFired: + return cls(timestamp=network.MonotonicTime.from_json(json["timestamp"]))
+ + + + + + + +
+[docs] +@event_class("Page.screencastFrame") +@dataclass +class ScreencastFrame: + """ + **EXPERIMENTAL** + + Compressed image data requested by the ``startScreencast``. + """ + + #: Base64-encoded compressed image. (Encoded as a base64 string when passed over JSON) + data: str + #: Screencast frame metadata. + metadata: ScreencastFrameMetadata + #: Frame number. + session_id: int + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreencastFrame: + return cls( + data=str(json["data"]), + metadata=ScreencastFrameMetadata.from_json(json["metadata"]), + session_id=int(json["sessionId"]), + )
+ + + +
+[docs] +@event_class("Page.screencastVisibilityChanged") +@dataclass +class ScreencastVisibilityChanged: + """ + **EXPERIMENTAL** + + Fired when the page with currently enabled screencast was shown or hidden . + """ + + #: True if the page is visible. + visible: bool + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScreencastVisibilityChanged: + return cls(visible=bool(json["visible"]))
+ + + +
+[docs] +@event_class("Page.windowOpen") +@dataclass +class WindowOpen: + """ + Fired when a new window is going to be opened, via window.open(), link click, form submission, + etc. + """ + + #: The URL for the new window. + url: str + #: Window name. + window_name: str + #: An array of enabled window features. + window_features: typing.List[str] + #: Whether or not it was triggered by user gesture. + user_gesture: bool + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WindowOpen: + return cls( + url=str(json["url"]), + window_name=str(json["windowName"]), + window_features=[str(i) for i in json["windowFeatures"]], + user_gesture=bool(json["userGesture"]), + )
+ + + +
+[docs] +@event_class("Page.compilationCacheProduced") +@dataclass +class CompilationCacheProduced: + """ + **EXPERIMENTAL** + + Issued for every compilation cache generated. Is only available + if Page.setGenerateCompilationCache is enabled. + """ + + url: str + #: Base64-encoded data (Encoded as a base64 string when passed over JSON) + data: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CompilationCacheProduced: + return cls(url=str(json["url"]), data=str(json["data"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/performance.html b/docs/_build/html/_modules/nodriver/cdp/performance.html new file mode 100644 index 0000000..37c8b3e --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/performance.html @@ -0,0 +1,446 @@ + + + + + + + + nodriver.cdp.performance - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.performance

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Performance
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +@dataclass +class Metric: + """ + Run-time execution metric. + """ + + #: Metric name. + name: str + + #: Metric value. + value: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Metric: + return cls( + name=str(json["name"]), + value=float(json["value"]), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disable collecting and reporting metrics. + """ + cmd_dict: T_JSON_DICT = { + "method": "Performance.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable( + time_domain: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable collecting and reporting metrics. + + :param time_domain: *(Optional)* Time domain to use for collecting and reporting duration metrics. + """ + params: T_JSON_DICT = dict() + if time_domain is not None: + params["timeDomain"] = time_domain + cmd_dict: T_JSON_DICT = { + "method": "Performance.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_time_domain( + time_domain: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets time domain to use for collecting and reporting duration metrics. + Note that this must be called before enabling metrics collection. Calling + this method while metrics collection is enabled returns an error. + + .. deprecated:: 1.3 + + **EXPERIMENTAL** + + :param time_domain: Time domain + """ + params: T_JSON_DICT = dict() + params["timeDomain"] = time_domain + cmd_dict: T_JSON_DICT = { + "method": "Performance.setTimeDomain", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_metrics() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Metric]]: + """ + Retrieve current values of run-time metrics. + + :returns: Current values for run-time metrics. + """ + cmd_dict: T_JSON_DICT = { + "method": "Performance.getMetrics", + } + json = yield cmd_dict + return [Metric.from_json(i) for i in json["metrics"]]
+ + + +
+[docs] +@event_class("Performance.metrics") +@dataclass +class Metrics: + """ + Current values of the metrics. + """ + + #: Current values of the metrics. + metrics: typing.List[Metric] + #: Timestamp title. + title: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Metrics: + return cls( + metrics=[Metric.from_json(i) for i in json["metrics"]], + title=str(json["title"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/performance_timeline.html b/docs/_build/html/_modules/nodriver/cdp/performance_timeline.html new file mode 100644 index 0000000..a2f856d --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/performance_timeline.html @@ -0,0 +1,545 @@ + + + + + + + + nodriver.cdp.performance_timeline - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.performance_timeline

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: PerformanceTimeline (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import network
+from . import page
+
+
+
+[docs] +@dataclass +class LargestContentfulPaint: + """ + See https://github.com/WICG/LargestContentfulPaint and largest_contentful_paint.idl + """ + + render_time: network.TimeSinceEpoch + + load_time: network.TimeSinceEpoch + + #: The number of pixels being painted. + size: float + + #: The id attribute of the element, if available. + element_id: typing.Optional[str] = None + + #: The URL of the image (may be trimmed). + url: typing.Optional[str] = None + + node_id: typing.Optional[dom.BackendNodeId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["renderTime"] = self.render_time.to_json() + json["loadTime"] = self.load_time.to_json() + json["size"] = self.size + if self.element_id is not None: + json["elementId"] = self.element_id + if self.url is not None: + json["url"] = self.url + if self.node_id is not None: + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LargestContentfulPaint: + return cls( + render_time=network.TimeSinceEpoch.from_json(json["renderTime"]), + load_time=network.TimeSinceEpoch.from_json(json["loadTime"]), + size=float(json["size"]), + element_id=( + str(json["elementId"]) + if json.get("elementId", None) is not None + else None + ), + url=str(json["url"]) if json.get("url", None) is not None else None, + node_id=( + dom.BackendNodeId.from_json(json["nodeId"]) + if json.get("nodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class LayoutShiftAttribution: + previous_rect: dom.Rect + + current_rect: dom.Rect + + node_id: typing.Optional[dom.BackendNodeId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["previousRect"] = self.previous_rect.to_json() + json["currentRect"] = self.current_rect.to_json() + if self.node_id is not None: + json["nodeId"] = self.node_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayoutShiftAttribution: + return cls( + previous_rect=dom.Rect.from_json(json["previousRect"]), + current_rect=dom.Rect.from_json(json["currentRect"]), + node_id=( + dom.BackendNodeId.from_json(json["nodeId"]) + if json.get("nodeId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class LayoutShift: + """ + See https://wicg.github.io/layout-instability/#sec-layout-shift and layout_shift.idl + """ + + #: Score increment produced by this event. + value: float + + had_recent_input: bool + + last_input_time: network.TimeSinceEpoch + + sources: typing.List[LayoutShiftAttribution] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value + json["hadRecentInput"] = self.had_recent_input + json["lastInputTime"] = self.last_input_time.to_json() + json["sources"] = [i.to_json() for i in self.sources] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> LayoutShift: + return cls( + value=float(json["value"]), + had_recent_input=bool(json["hadRecentInput"]), + last_input_time=network.TimeSinceEpoch.from_json(json["lastInputTime"]), + sources=[LayoutShiftAttribution.from_json(i) for i in json["sources"]], + )
+ + + +
+[docs] +@dataclass +class TimelineEvent: + #: Identifies the frame that this event is related to. Empty for non-frame targets. + frame_id: page.FrameId + + #: The event type, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype + #: This determines which of the optional "details" fields is present. + type_: str + + #: Name may be empty depending on the type. + name: str + + #: Time in seconds since Epoch, monotonically increasing within document lifetime. + time: network.TimeSinceEpoch + + #: Event duration, if applicable. + duration: typing.Optional[float] = None + + lcp_details: typing.Optional[LargestContentfulPaint] = None + + layout_shift_details: typing.Optional[LayoutShift] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["frameId"] = self.frame_id.to_json() + json["type"] = self.type_ + json["name"] = self.name + json["time"] = self.time.to_json() + if self.duration is not None: + json["duration"] = self.duration + if self.lcp_details is not None: + json["lcpDetails"] = self.lcp_details.to_json() + if self.layout_shift_details is not None: + json["layoutShiftDetails"] = self.layout_shift_details.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TimelineEvent: + return cls( + frame_id=page.FrameId.from_json(json["frameId"]), + type_=str(json["type"]), + name=str(json["name"]), + time=network.TimeSinceEpoch.from_json(json["time"]), + duration=( + float(json["duration"]) + if json.get("duration", None) is not None + else None + ), + lcp_details=( + LargestContentfulPaint.from_json(json["lcpDetails"]) + if json.get("lcpDetails", None) is not None + else None + ), + layout_shift_details=( + LayoutShift.from_json(json["layoutShiftDetails"]) + if json.get("layoutShiftDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def enable( + event_types: typing.List[str], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Previously buffered events would be reported before method returns. + See also: timelineEventAdded + + :param event_types: The types of event to report, as specified in https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype The specified filter overrides any previous filters, passing empty filter disables recording. Note that not all types exposed to the web platform are currently supported. + """ + params: T_JSON_DICT = dict() + params["eventTypes"] = [i for i in event_types] + cmd_dict: T_JSON_DICT = { + "method": "PerformanceTimeline.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("PerformanceTimeline.timelineEventAdded") +@dataclass +class TimelineEventAdded: + """ + Sent when a performance timeline event is added. See reportPerformanceTimeline method. + """ + + event: TimelineEvent + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TimelineEventAdded: + return cls(event=TimelineEvent.from_json(json["event"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/preload.html b/docs/_build/html/_modules/nodriver/cdp/preload.html new file mode 100644 index 0000000..4a9efaf --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/preload.html @@ -0,0 +1,984 @@ + + + + + + + + nodriver.cdp.preload - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.preload

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Preload (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import dom
+from . import network
+from . import page
+
+
+
+[docs] +class RuleSetId(str): + """ + Unique id + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RuleSetId: + return cls(json) + + def __repr__(self): + return "RuleSetId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class RuleSet: + """ + Corresponds to SpeculationRuleSet + """ + + id_: RuleSetId + + #: Identifies a document which the rule set is associated with. + loader_id: network.LoaderId + + #: Source text of JSON representing the rule set. If it comes from + #: ``<script>`` tag, it is the textContent of the node. Note that it is + #: a JSON for valid case. + #: + #: See also: + #: - https://wicg.github.io/nav-speculation/speculation-rules.html + #: - https://github.com/WICG/nav-speculation/blob/main/triggers.md + source_text: str + + #: A speculation rule set is either added through an inline + #: ``<script>`` tag or through an external resource via the + #: 'Speculation-Rules' HTTP header. For the first case, we include + #: the BackendNodeId of the relevant ``<script>`` tag. For the second + #: case, we include the external URL where the rule set was loaded + #: from, and also RequestId if Network domain is enabled. + #: + #: See also: + #: - https://wicg.github.io/nav-speculation/speculation-rules.html#speculation-rules-script + #: - https://wicg.github.io/nav-speculation/speculation-rules.html#speculation-rules-header + backend_node_id: typing.Optional[dom.BackendNodeId] = None + + url: typing.Optional[str] = None + + request_id: typing.Optional[network.RequestId] = None + + #: Error information + #: ``errorMessage`` is null iff ``errorType`` is null. + error_type: typing.Optional[RuleSetErrorType] = None + + #: TODO(https://crbug.com/1425354): Replace this property with structured error. + error_message: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["loaderId"] = self.loader_id.to_json() + json["sourceText"] = self.source_text + if self.backend_node_id is not None: + json["backendNodeId"] = self.backend_node_id.to_json() + if self.url is not None: + json["url"] = self.url + if self.request_id is not None: + json["requestId"] = self.request_id.to_json() + if self.error_type is not None: + json["errorType"] = self.error_type.to_json() + if self.error_message is not None: + json["errorMessage"] = self.error_message + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RuleSet: + return cls( + id_=RuleSetId.from_json(json["id"]), + loader_id=network.LoaderId.from_json(json["loaderId"]), + source_text=str(json["sourceText"]), + backend_node_id=( + dom.BackendNodeId.from_json(json["backendNodeId"]) + if json.get("backendNodeId", None) is not None + else None + ), + url=str(json["url"]) if json.get("url", None) is not None else None, + request_id=( + network.RequestId.from_json(json["requestId"]) + if json.get("requestId", None) is not None + else None + ), + error_type=( + RuleSetErrorType.from_json(json["errorType"]) + if json.get("errorType", None) is not None + else None + ), + error_message=( + str(json["errorMessage"]) + if json.get("errorMessage", None) is not None + else None + ), + )
+ + + +
+[docs] +class RuleSetErrorType(enum.Enum): + SOURCE_IS_NOT_JSON_OBJECT = "SourceIsNotJsonObject" + INVALID_RULES_SKIPPED = "InvalidRulesSkipped" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> RuleSetErrorType: + return cls(json)
+ + + +
+[docs] +class SpeculationAction(enum.Enum): + """ + The type of preloading attempted. It corresponds to + mojom::SpeculationAction (although PrefetchWithSubresources is omitted as it + isn't being used by clients). + """ + + PREFETCH = "Prefetch" + PRERENDER = "Prerender" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SpeculationAction: + return cls(json)
+ + + +
+[docs] +class SpeculationTargetHint(enum.Enum): + """ + Corresponds to mojom::SpeculationTargetHint. + See https://github.com/WICG/nav-speculation/blob/main/triggers.md#window-name-targeting-hints + """ + + BLANK = "Blank" + SELF = "Self" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SpeculationTargetHint: + return cls(json)
+ + + +
+[docs] +@dataclass +class PreloadingAttemptKey: + """ + A key that identifies a preloading attempt. + + The url used is the url specified by the trigger (i.e. the initial URL), and + not the final url that is navigated to. For example, prerendering allows + same-origin main frame navigations during the attempt, but the attempt is + still keyed with the initial URL. + """ + + loader_id: network.LoaderId + + action: SpeculationAction + + url: str + + target_hint: typing.Optional[SpeculationTargetHint] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["loaderId"] = self.loader_id.to_json() + json["action"] = self.action.to_json() + json["url"] = self.url + if self.target_hint is not None: + json["targetHint"] = self.target_hint.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PreloadingAttemptKey: + return cls( + loader_id=network.LoaderId.from_json(json["loaderId"]), + action=SpeculationAction.from_json(json["action"]), + url=str(json["url"]), + target_hint=( + SpeculationTargetHint.from_json(json["targetHint"]) + if json.get("targetHint", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PreloadingAttemptSource: + """ + Lists sources for a preloading attempt, specifically the ids of rule sets + that had a speculation rule that triggered the attempt, and the + BackendNodeIds of <a href> or <area href> elements that triggered the + attempt (in the case of attempts triggered by a document rule). It is + possible for multiple rule sets and links to trigger a single attempt. + """ + + key: PreloadingAttemptKey + + rule_set_ids: typing.List[RuleSetId] + + node_ids: typing.List[dom.BackendNodeId] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key.to_json() + json["ruleSetIds"] = [i.to_json() for i in self.rule_set_ids] + json["nodeIds"] = [i.to_json() for i in self.node_ids] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PreloadingAttemptSource: + return cls( + key=PreloadingAttemptKey.from_json(json["key"]), + rule_set_ids=[RuleSetId.from_json(i) for i in json["ruleSetIds"]], + node_ids=[dom.BackendNodeId.from_json(i) for i in json["nodeIds"]], + )
+ + + +
+[docs] +class PrerenderFinalStatus(enum.Enum): + """ + List of FinalStatus reasons for Prerender2. + """ + + ACTIVATED = "Activated" + DESTROYED = "Destroyed" + LOW_END_DEVICE = "LowEndDevice" + INVALID_SCHEME_REDIRECT = "InvalidSchemeRedirect" + INVALID_SCHEME_NAVIGATION = "InvalidSchemeNavigation" + NAVIGATION_REQUEST_BLOCKED_BY_CSP = "NavigationRequestBlockedByCsp" + MAIN_FRAME_NAVIGATION = "MainFrameNavigation" + MOJO_BINDER_POLICY = "MojoBinderPolicy" + RENDERER_PROCESS_CRASHED = "RendererProcessCrashed" + RENDERER_PROCESS_KILLED = "RendererProcessKilled" + DOWNLOAD = "Download" + TRIGGER_DESTROYED = "TriggerDestroyed" + NAVIGATION_NOT_COMMITTED = "NavigationNotCommitted" + NAVIGATION_BAD_HTTP_STATUS = "NavigationBadHttpStatus" + CLIENT_CERT_REQUESTED = "ClientCertRequested" + NAVIGATION_REQUEST_NETWORK_ERROR = "NavigationRequestNetworkError" + CANCEL_ALL_HOSTS_FOR_TESTING = "CancelAllHostsForTesting" + DID_FAIL_LOAD = "DidFailLoad" + STOP = "Stop" + SSL_CERTIFICATE_ERROR = "SslCertificateError" + LOGIN_AUTH_REQUESTED = "LoginAuthRequested" + UA_CHANGE_REQUIRES_RELOAD = "UaChangeRequiresReload" + BLOCKED_BY_CLIENT = "BlockedByClient" + AUDIO_OUTPUT_DEVICE_REQUESTED = "AudioOutputDeviceRequested" + MIXED_CONTENT = "MixedContent" + TRIGGER_BACKGROUNDED = "TriggerBackgrounded" + MEMORY_LIMIT_EXCEEDED = "MemoryLimitExceeded" + DATA_SAVER_ENABLED = "DataSaverEnabled" + TRIGGER_URL_HAS_EFFECTIVE_URL = "TriggerUrlHasEffectiveUrl" + ACTIVATED_BEFORE_STARTED = "ActivatedBeforeStarted" + INACTIVE_PAGE_RESTRICTION = "InactivePageRestriction" + START_FAILED = "StartFailed" + TIMEOUT_BACKGROUNDED = "TimeoutBackgrounded" + CROSS_SITE_REDIRECT_IN_INITIAL_NAVIGATION = "CrossSiteRedirectInInitialNavigation" + CROSS_SITE_NAVIGATION_IN_INITIAL_NAVIGATION = ( + "CrossSiteNavigationInInitialNavigation" + ) + SAME_SITE_CROSS_ORIGIN_REDIRECT_NOT_OPT_IN_IN_INITIAL_NAVIGATION = ( + "SameSiteCrossOriginRedirectNotOptInInInitialNavigation" + ) + SAME_SITE_CROSS_ORIGIN_NAVIGATION_NOT_OPT_IN_IN_INITIAL_NAVIGATION = ( + "SameSiteCrossOriginNavigationNotOptInInInitialNavigation" + ) + ACTIVATION_NAVIGATION_PARAMETER_MISMATCH = "ActivationNavigationParameterMismatch" + ACTIVATED_IN_BACKGROUND = "ActivatedInBackground" + EMBEDDER_HOST_DISALLOWED = "EmbedderHostDisallowed" + ACTIVATION_NAVIGATION_DESTROYED_BEFORE_SUCCESS = ( + "ActivationNavigationDestroyedBeforeSuccess" + ) + TAB_CLOSED_BY_USER_GESTURE = "TabClosedByUserGesture" + TAB_CLOSED_WITHOUT_USER_GESTURE = "TabClosedWithoutUserGesture" + PRIMARY_MAIN_FRAME_RENDERER_PROCESS_CRASHED = ( + "PrimaryMainFrameRendererProcessCrashed" + ) + PRIMARY_MAIN_FRAME_RENDERER_PROCESS_KILLED = "PrimaryMainFrameRendererProcessKilled" + ACTIVATION_FRAME_POLICY_NOT_COMPATIBLE = "ActivationFramePolicyNotCompatible" + PRELOADING_DISABLED = "PreloadingDisabled" + BATTERY_SAVER_ENABLED = "BatterySaverEnabled" + ACTIVATED_DURING_MAIN_FRAME_NAVIGATION = "ActivatedDuringMainFrameNavigation" + PRELOADING_UNSUPPORTED_BY_WEB_CONTENTS = "PreloadingUnsupportedByWebContents" + CROSS_SITE_REDIRECT_IN_MAIN_FRAME_NAVIGATION = ( + "CrossSiteRedirectInMainFrameNavigation" + ) + CROSS_SITE_NAVIGATION_IN_MAIN_FRAME_NAVIGATION = ( + "CrossSiteNavigationInMainFrameNavigation" + ) + SAME_SITE_CROSS_ORIGIN_REDIRECT_NOT_OPT_IN_IN_MAIN_FRAME_NAVIGATION = ( + "SameSiteCrossOriginRedirectNotOptInInMainFrameNavigation" + ) + SAME_SITE_CROSS_ORIGIN_NAVIGATION_NOT_OPT_IN_IN_MAIN_FRAME_NAVIGATION = ( + "SameSiteCrossOriginNavigationNotOptInInMainFrameNavigation" + ) + MEMORY_PRESSURE_ON_TRIGGER = "MemoryPressureOnTrigger" + MEMORY_PRESSURE_AFTER_TRIGGERED = "MemoryPressureAfterTriggered" + PRERENDERING_DISABLED_BY_DEV_TOOLS = "PrerenderingDisabledByDevTools" + SPECULATION_RULE_REMOVED = "SpeculationRuleRemoved" + ACTIVATED_WITH_AUXILIARY_BROWSING_CONTEXTS = ( + "ActivatedWithAuxiliaryBrowsingContexts" + ) + MAX_NUM_OF_RUNNING_EAGER_PRERENDERS_EXCEEDED = ( + "MaxNumOfRunningEagerPrerendersExceeded" + ) + MAX_NUM_OF_RUNNING_NON_EAGER_PRERENDERS_EXCEEDED = ( + "MaxNumOfRunningNonEagerPrerendersExceeded" + ) + MAX_NUM_OF_RUNNING_EMBEDDER_PRERENDERS_EXCEEDED = ( + "MaxNumOfRunningEmbedderPrerendersExceeded" + ) + PRERENDERING_URL_HAS_EFFECTIVE_URL = "PrerenderingUrlHasEffectiveUrl" + REDIRECTED_PRERENDERING_URL_HAS_EFFECTIVE_URL = ( + "RedirectedPrerenderingUrlHasEffectiveUrl" + ) + ACTIVATION_URL_HAS_EFFECTIVE_URL = "ActivationUrlHasEffectiveUrl" + JAVA_SCRIPT_INTERFACE_ADDED = "JavaScriptInterfaceAdded" + JAVA_SCRIPT_INTERFACE_REMOVED = "JavaScriptInterfaceRemoved" + ALL_PRERENDERING_CANCELED = "AllPrerenderingCanceled" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PrerenderFinalStatus: + return cls(json)
+ + + +
+[docs] +class PreloadingStatus(enum.Enum): + """ + Preloading status values, see also PreloadingTriggeringOutcome. This + status is shared by prefetchStatusUpdated and prerenderStatusUpdated. + """ + + PENDING = "Pending" + RUNNING = "Running" + READY = "Ready" + SUCCESS = "Success" + FAILURE = "Failure" + NOT_SUPPORTED = "NotSupported" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PreloadingStatus: + return cls(json)
+ + + +
+[docs] +class PrefetchStatus(enum.Enum): + """ + TODO(https://crbug.com/1384419): revisit the list of PrefetchStatus and + filter out the ones that aren't necessary to the developers. + """ + + PREFETCH_ALLOWED = "PrefetchAllowed" + PREFETCH_FAILED_INELIGIBLE_REDIRECT = "PrefetchFailedIneligibleRedirect" + PREFETCH_FAILED_INVALID_REDIRECT = "PrefetchFailedInvalidRedirect" + PREFETCH_FAILED_MIME_NOT_SUPPORTED = "PrefetchFailedMIMENotSupported" + PREFETCH_FAILED_NET_ERROR = "PrefetchFailedNetError" + PREFETCH_FAILED_NON2_XX = "PrefetchFailedNon2XX" + PREFETCH_FAILED_PER_PAGE_LIMIT_EXCEEDED = "PrefetchFailedPerPageLimitExceeded" + PREFETCH_EVICTED_AFTER_CANDIDATE_REMOVED = "PrefetchEvictedAfterCandidateRemoved" + PREFETCH_EVICTED_FOR_NEWER_PREFETCH = "PrefetchEvictedForNewerPrefetch" + PREFETCH_HELDBACK = "PrefetchHeldback" + PREFETCH_INELIGIBLE_RETRY_AFTER = "PrefetchIneligibleRetryAfter" + PREFETCH_IS_PRIVACY_DECOY = "PrefetchIsPrivacyDecoy" + PREFETCH_IS_STALE = "PrefetchIsStale" + PREFETCH_NOT_ELIGIBLE_BROWSER_CONTEXT_OFF_THE_RECORD = ( + "PrefetchNotEligibleBrowserContextOffTheRecord" + ) + PREFETCH_NOT_ELIGIBLE_DATA_SAVER_ENABLED = "PrefetchNotEligibleDataSaverEnabled" + PREFETCH_NOT_ELIGIBLE_EXISTING_PROXY = "PrefetchNotEligibleExistingProxy" + PREFETCH_NOT_ELIGIBLE_HOST_IS_NON_UNIQUE = "PrefetchNotEligibleHostIsNonUnique" + PREFETCH_NOT_ELIGIBLE_NON_DEFAULT_STORAGE_PARTITION = ( + "PrefetchNotEligibleNonDefaultStoragePartition" + ) + PREFETCH_NOT_ELIGIBLE_SAME_SITE_CROSS_ORIGIN_PREFETCH_REQUIRED_PROXY = ( + "PrefetchNotEligibleSameSiteCrossOriginPrefetchRequiredProxy" + ) + PREFETCH_NOT_ELIGIBLE_SCHEME_IS_NOT_HTTPS = "PrefetchNotEligibleSchemeIsNotHttps" + PREFETCH_NOT_ELIGIBLE_USER_HAS_COOKIES = "PrefetchNotEligibleUserHasCookies" + PREFETCH_NOT_ELIGIBLE_USER_HAS_SERVICE_WORKER = ( + "PrefetchNotEligibleUserHasServiceWorker" + ) + PREFETCH_NOT_ELIGIBLE_BATTERY_SAVER_ENABLED = ( + "PrefetchNotEligibleBatterySaverEnabled" + ) + PREFETCH_NOT_ELIGIBLE_PRELOADING_DISABLED = "PrefetchNotEligiblePreloadingDisabled" + PREFETCH_NOT_FINISHED_IN_TIME = "PrefetchNotFinishedInTime" + PREFETCH_NOT_STARTED = "PrefetchNotStarted" + PREFETCH_NOT_USED_COOKIES_CHANGED = "PrefetchNotUsedCookiesChanged" + PREFETCH_PROXY_NOT_AVAILABLE = "PrefetchProxyNotAvailable" + PREFETCH_RESPONSE_USED = "PrefetchResponseUsed" + PREFETCH_SUCCESSFUL_BUT_NOT_USED = "PrefetchSuccessfulButNotUsed" + PREFETCH_NOT_USED_PROBE_FAILED = "PrefetchNotUsedProbeFailed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> PrefetchStatus: + return cls(json)
+ + + +
+[docs] +@dataclass +class PrerenderMismatchedHeaders: + """ + Information of headers to be displayed when the header mismatch occurred. + """ + + header_name: str + + initial_value: typing.Optional[str] = None + + activation_value: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["headerName"] = self.header_name + if self.initial_value is not None: + json["initialValue"] = self.initial_value + if self.activation_value is not None: + json["activationValue"] = self.activation_value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PrerenderMismatchedHeaders: + return cls( + header_name=str(json["headerName"]), + initial_value=( + str(json["initialValue"]) + if json.get("initialValue", None) is not None + else None + ), + activation_value=( + str(json["activationValue"]) + if json.get("activationValue", None) is not None + else None + ), + )
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Preload.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Preload.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Preload.ruleSetUpdated") +@dataclass +class RuleSetUpdated: + """ + Upsert. Currently, it is only emitted when a rule set added. + """ + + rule_set: RuleSet + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RuleSetUpdated: + return cls(rule_set=RuleSet.from_json(json["ruleSet"]))
+ + + +
+[docs] +@event_class("Preload.ruleSetRemoved") +@dataclass +class RuleSetRemoved: + id_: RuleSetId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RuleSetRemoved: + return cls(id_=RuleSetId.from_json(json["id"]))
+ + + +
+[docs] +@event_class("Preload.preloadEnabledStateUpdated") +@dataclass +class PreloadEnabledStateUpdated: + """ + Fired when a preload enabled state is updated. + """ + + disabled_by_preference: bool + disabled_by_data_saver: bool + disabled_by_battery_saver: bool + disabled_by_holdback_prefetch_speculation_rules: bool + disabled_by_holdback_prerender_speculation_rules: bool + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PreloadEnabledStateUpdated: + return cls( + disabled_by_preference=bool(json["disabledByPreference"]), + disabled_by_data_saver=bool(json["disabledByDataSaver"]), + disabled_by_battery_saver=bool(json["disabledByBatterySaver"]), + disabled_by_holdback_prefetch_speculation_rules=bool( + json["disabledByHoldbackPrefetchSpeculationRules"] + ), + disabled_by_holdback_prerender_speculation_rules=bool( + json["disabledByHoldbackPrerenderSpeculationRules"] + ), + )
+ + + +
+[docs] +@event_class("Preload.prefetchStatusUpdated") +@dataclass +class PrefetchStatusUpdated: + """ + Fired when a prefetch attempt is updated. + """ + + key: PreloadingAttemptKey + #: The frame id of the frame initiating prefetch. + initiating_frame_id: page.FrameId + prefetch_url: str + status: PreloadingStatus + prefetch_status: PrefetchStatus + request_id: network.RequestId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PrefetchStatusUpdated: + return cls( + key=PreloadingAttemptKey.from_json(json["key"]), + initiating_frame_id=page.FrameId.from_json(json["initiatingFrameId"]), + prefetch_url=str(json["prefetchUrl"]), + status=PreloadingStatus.from_json(json["status"]), + prefetch_status=PrefetchStatus.from_json(json["prefetchStatus"]), + request_id=network.RequestId.from_json(json["requestId"]), + )
+ + + +
+[docs] +@event_class("Preload.prerenderStatusUpdated") +@dataclass +class PrerenderStatusUpdated: + """ + Fired when a prerender attempt is updated. + """ + + key: PreloadingAttemptKey + status: PreloadingStatus + prerender_status: typing.Optional[PrerenderFinalStatus] + #: This is used to give users more information about the name of Mojo interface + #: that is incompatible with prerender and has caused the cancellation of the attempt. + disallowed_mojo_interface: typing.Optional[str] + mismatched_headers: typing.Optional[typing.List[PrerenderMismatchedHeaders]] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PrerenderStatusUpdated: + return cls( + key=PreloadingAttemptKey.from_json(json["key"]), + status=PreloadingStatus.from_json(json["status"]), + prerender_status=( + PrerenderFinalStatus.from_json(json["prerenderStatus"]) + if json.get("prerenderStatus", None) is not None + else None + ), + disallowed_mojo_interface=( + str(json["disallowedMojoInterface"]) + if json.get("disallowedMojoInterface", None) is not None + else None + ), + mismatched_headers=( + [ + PrerenderMismatchedHeaders.from_json(i) + for i in json["mismatchedHeaders"] + ] + if json.get("mismatchedHeaders", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Preload.preloadingAttemptSourcesUpdated") +@dataclass +class PreloadingAttemptSourcesUpdated: + """ + Send a list of sources for all preloading attempts in a document. + """ + + loader_id: network.LoaderId + preloading_attempt_sources: typing.List[PreloadingAttemptSource] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PreloadingAttemptSourcesUpdated: + return cls( + loader_id=network.LoaderId.from_json(json["loaderId"]), + preloading_attempt_sources=[ + PreloadingAttemptSource.from_json(i) + for i in json["preloadingAttemptSources"] + ], + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/profiler.html b/docs/_build/html/_modules/nodriver/cdp/profiler.html new file mode 100644 index 0000000..9ba4c27 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/profiler.html @@ -0,0 +1,814 @@ + + + + + + + + nodriver.cdp.profiler - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.profiler

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Profiler
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import debugger
+from . import runtime
+
+
+
+[docs] +@dataclass +class ProfileNode: + """ + Profile node. Holds callsite information, execution statistics and child nodes. + """ + + #: Unique id of the node. + id_: int + + #: Function location. + call_frame: runtime.CallFrame + + #: Number of samples where this node was on top of the call stack. + hit_count: typing.Optional[int] = None + + #: Child node ids. + children: typing.Optional[typing.List[int]] = None + + #: The reason of being not optimized. The function may be deoptimized or marked as don't + #: optimize. + deopt_reason: typing.Optional[str] = None + + #: An array of source position ticks. + position_ticks: typing.Optional[typing.List[PositionTickInfo]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_ + json["callFrame"] = self.call_frame.to_json() + if self.hit_count is not None: + json["hitCount"] = self.hit_count + if self.children is not None: + json["children"] = [i for i in self.children] + if self.deopt_reason is not None: + json["deoptReason"] = self.deopt_reason + if self.position_ticks is not None: + json["positionTicks"] = [i.to_json() for i in self.position_ticks] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ProfileNode: + return cls( + id_=int(json["id"]), + call_frame=runtime.CallFrame.from_json(json["callFrame"]), + hit_count=( + int(json["hitCount"]) + if json.get("hitCount", None) is not None + else None + ), + children=( + [int(i) for i in json["children"]] + if json.get("children", None) is not None + else None + ), + deopt_reason=( + str(json["deoptReason"]) + if json.get("deoptReason", None) is not None + else None + ), + position_ticks=( + [PositionTickInfo.from_json(i) for i in json["positionTicks"]] + if json.get("positionTicks", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Profile: + """ + Profile. + """ + + #: The list of profile nodes. First item is the root node. + nodes: typing.List[ProfileNode] + + #: Profiling start timestamp in microseconds. + start_time: float + + #: Profiling end timestamp in microseconds. + end_time: float + + #: Ids of samples top nodes. + samples: typing.Optional[typing.List[int]] = None + + #: Time intervals between adjacent samples in microseconds. The first delta is relative to the + #: profile startTime. + time_deltas: typing.Optional[typing.List[int]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodes"] = [i.to_json() for i in self.nodes] + json["startTime"] = self.start_time + json["endTime"] = self.end_time + if self.samples is not None: + json["samples"] = [i for i in self.samples] + if self.time_deltas is not None: + json["timeDeltas"] = [i for i in self.time_deltas] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Profile: + return cls( + nodes=[ProfileNode.from_json(i) for i in json["nodes"]], + start_time=float(json["startTime"]), + end_time=float(json["endTime"]), + samples=( + [int(i) for i in json["samples"]] + if json.get("samples", None) is not None + else None + ), + time_deltas=( + [int(i) for i in json["timeDeltas"]] + if json.get("timeDeltas", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PositionTickInfo: + """ + Specifies a number of samples attributed to a certain source position. + """ + + #: Source line number (1-based). + line: int + + #: Number of samples attributed to the source line. + ticks: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["line"] = self.line + json["ticks"] = self.ticks + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PositionTickInfo: + return cls( + line=int(json["line"]), + ticks=int(json["ticks"]), + )
+ + + +
+[docs] +@dataclass +class CoverageRange: + """ + Coverage data for a source range. + """ + + #: JavaScript script source offset for the range start. + start_offset: int + + #: JavaScript script source offset for the range end. + end_offset: int + + #: Collected execution count of the source range. + count: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["startOffset"] = self.start_offset + json["endOffset"] = self.end_offset + json["count"] = self.count + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CoverageRange: + return cls( + start_offset=int(json["startOffset"]), + end_offset=int(json["endOffset"]), + count=int(json["count"]), + )
+ + + +
+[docs] +@dataclass +class FunctionCoverage: + """ + Coverage data for a JavaScript function. + """ + + #: JavaScript function name. + function_name: str + + #: Source ranges inside the function with coverage data. + ranges: typing.List[CoverageRange] + + #: Whether coverage data for this function has block granularity. + is_block_coverage: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["functionName"] = self.function_name + json["ranges"] = [i.to_json() for i in self.ranges] + json["isBlockCoverage"] = self.is_block_coverage + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FunctionCoverage: + return cls( + function_name=str(json["functionName"]), + ranges=[CoverageRange.from_json(i) for i in json["ranges"]], + is_block_coverage=bool(json["isBlockCoverage"]), + )
+ + + +
+[docs] +@dataclass +class ScriptCoverage: + """ + Coverage data for a JavaScript script. + """ + + #: JavaScript script id. + script_id: runtime.ScriptId + + #: JavaScript script name or url. + url: str + + #: Functions contained in the script that has coverage data. + functions: typing.List[FunctionCoverage] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["scriptId"] = self.script_id.to_json() + json["url"] = self.url + json["functions"] = [i.to_json() for i in self.functions] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ScriptCoverage: + return cls( + script_id=runtime.ScriptId.from_json(json["scriptId"]), + url=str(json["url"]), + functions=[FunctionCoverage.from_json(i) for i in json["functions"]], + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Profiler.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Profiler.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_best_effort_coverage() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[ScriptCoverage]] +): + """ + Collect coverage data for the current isolate. The coverage data may be incomplete due to + garbage collection. + + :returns: Coverage data for the current isolate. + """ + cmd_dict: T_JSON_DICT = { + "method": "Profiler.getBestEffortCoverage", + } + json = yield cmd_dict + return [ScriptCoverage.from_json(i) for i in json["result"]]
+ + + +
+[docs] +def set_sampling_interval( + interval: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Changes CPU profiler sampling interval. Must be called before CPU profiles recording started. + + :param interval: New sampling interval in microseconds. + """ + params: T_JSON_DICT = dict() + params["interval"] = interval + cmd_dict: T_JSON_DICT = { + "method": "Profiler.setSamplingInterval", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "Profiler.start", + } + json = yield cmd_dict
+ + + +
+[docs] +def start_precise_coverage( + call_count: typing.Optional[bool] = None, + detailed: typing.Optional[bool] = None, + allow_triggered_updates: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, float]: + """ + Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code + coverage may be incomplete. Enabling prevents running optimized code and resets execution + counters. + + :param call_count: *(Optional)* Collect accurate call counts beyond simple 'covered' or 'not covered'. + :param detailed: *(Optional)* Collect block-based coverage. + :param allow_triggered_updates: *(Optional)* Allow the backend to send updates on its own initiative + :returns: Monotonically increasing time (in seconds) when the coverage update was taken in the backend. + """ + params: T_JSON_DICT = dict() + if call_count is not None: + params["callCount"] = call_count + if detailed is not None: + params["detailed"] = detailed + if allow_triggered_updates is not None: + params["allowTriggeredUpdates"] = allow_triggered_updates + cmd_dict: T_JSON_DICT = { + "method": "Profiler.startPreciseCoverage", + "params": params, + } + json = yield cmd_dict + return float(json["timestamp"])
+ + + +
+[docs] +def stop() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Profile]: + """ + + + :returns: Recorded profile. + """ + cmd_dict: T_JSON_DICT = { + "method": "Profiler.stop", + } + json = yield cmd_dict + return Profile.from_json(json["profile"])
+ + + +
+[docs] +def stop_precise_coverage() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disable precise code coverage. Disabling releases unnecessary execution count records and allows + executing optimized code. + """ + cmd_dict: T_JSON_DICT = { + "method": "Profiler.stopPreciseCoverage", + } + json = yield cmd_dict
+ + + +
+[docs] +def take_precise_coverage() -> ( + typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[typing.List[ScriptCoverage], float] + ] +): + """ + Collect coverage data for the current isolate, and resets execution counters. Precise code + coverage needs to have started. + + :returns: A tuple with the following items: + + 0. **result** - Coverage data for the current isolate. + 1. **timestamp** - Monotonically increasing time (in seconds) when the coverage update was taken in the backend. + """ + cmd_dict: T_JSON_DICT = { + "method": "Profiler.takePreciseCoverage", + } + json = yield cmd_dict + return ( + [ScriptCoverage.from_json(i) for i in json["result"]], + float(json["timestamp"]), + )
+ + + +
+[docs] +@event_class("Profiler.consoleProfileFinished") +@dataclass +class ConsoleProfileFinished: + id_: str + #: Location of console.profileEnd(). + location: debugger.Location + profile: Profile + #: Profile title passed as an argument to console.profile(). + title: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileFinished: + return cls( + id_=str(json["id"]), + location=debugger.Location.from_json(json["location"]), + profile=Profile.from_json(json["profile"]), + title=str(json["title"]) if json.get("title", None) is not None else None, + )
+ + + +
+[docs] +@event_class("Profiler.consoleProfileStarted") +@dataclass +class ConsoleProfileStarted: + """ + Sent when new profile recording is started using console.profile() call. + """ + + id_: str + #: Location of console.profile(). + location: debugger.Location + #: Profile title passed as an argument to console.profile(). + title: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileStarted: + return cls( + id_=str(json["id"]), + location=debugger.Location.from_json(json["location"]), + title=str(json["title"]) if json.get("title", None) is not None else None, + )
+ + + +
+[docs] +@event_class("Profiler.preciseCoverageDeltaUpdate") +@dataclass +class PreciseCoverageDeltaUpdate: + """ + **EXPERIMENTAL** + + Reports coverage delta since the last poll (either from an event like this, or from + ``takePreciseCoverage`` for the current isolate. May only be sent if precise code + coverage has been started. This event can be trigged by the embedder to, for example, + trigger collection of coverage data immediately at a certain point in time. + """ + + #: Monotonically increasing time (in seconds) when the coverage update was taken in the backend. + timestamp: float + #: Identifier for distinguishing coverage events. + occasion: str + #: Coverage data for the current isolate. + result: typing.List[ScriptCoverage] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PreciseCoverageDeltaUpdate: + return cls( + timestamp=float(json["timestamp"]), + occasion=str(json["occasion"]), + result=[ScriptCoverage.from_json(i) for i in json["result"]], + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/pwa.html b/docs/_build/html/_modules/nodriver/cdp/pwa.html new file mode 100644 index 0000000..f0b186d --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/pwa.html @@ -0,0 +1,593 @@ + + + + + + + + nodriver.cdp.pwa - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.pwa

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: PWA (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import target
+
+
+
+[docs] +@dataclass +class FileHandlerAccept: + """ + The following types are the replica of + https://crsrc.org/c/chrome/browser/web_applications/proto/web_app_os_integration_state.proto;drc=9910d3be894c8f142c977ba1023f30a656bc13fc;l=67 + """ + + #: New name of the mimetype according to + #: https://www.iana.org/assignments/media-types/media-types.xhtml + media_type: str + + file_extensions: typing.List[str] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["mediaType"] = self.media_type + json["fileExtensions"] = [i for i in self.file_extensions] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FileHandlerAccept: + return cls( + media_type=str(json["mediaType"]), + file_extensions=[str(i) for i in json["fileExtensions"]], + )
+ + + +
+[docs] +@dataclass +class FileHandler: + action: str + + accepts: typing.List[FileHandlerAccept] + + display_name: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["action"] = self.action + json["accepts"] = [i.to_json() for i in self.accepts] + json["displayName"] = self.display_name + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FileHandler: + return cls( + action=str(json["action"]), + accepts=[FileHandlerAccept.from_json(i) for i in json["accepts"]], + display_name=str(json["displayName"]), + )
+ + + +
+[docs] +class DisplayMode(enum.Enum): + """ + If user prefers opening the app in browser or an app window. + """ + + STANDALONE = "standalone" + BROWSER = "browser" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> DisplayMode: + return cls(json)
+ + + +
+[docs] +def get_os_app_state( + manifest_id: str, +) -> typing.Generator[ + T_JSON_DICT, T_JSON_DICT, typing.Tuple[int, typing.List[FileHandler]] +]: + """ + Returns the following OS state for the given manifest id. + + :param manifest_id: The id from the webapp's manifest file, commonly it's the url of the site installing the webapp. See https://web.dev/learn/pwa/web-app-manifest. + :returns: A tuple with the following items: + + 0. **badgeCount** - + 1. **fileHandlers** - + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + cmd_dict: T_JSON_DICT = { + "method": "PWA.getOsAppState", + "params": params, + } + json = yield cmd_dict + return ( + int(json["badgeCount"]), + [FileHandler.from_json(i) for i in json["fileHandlers"]], + )
+ + + +
+[docs] +def install( + manifest_id: str, install_url_or_bundle_url: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Installs the given manifest identity, optionally using the given install_url + or IWA bundle location. + + TODO(crbug.com/337872319) Support IWA to meet the following specific + requirement. + IWA-specific install description: If the manifest_id is isolated-app://, + install_url_or_bundle_url is required, and can be either an http(s) URL or + file:// URL pointing to a signed web bundle (.swbn). The .swbn file's + signing key must correspond to manifest_id. If Chrome is not in IWA dev + mode, the installation will fail, regardless of the state of the allowlist. + + :param manifest_id: + :param install_url_or_bundle_url: *(Optional)* The location of the app or bundle overriding the one derived from the manifestId. + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + if install_url_or_bundle_url is not None: + params["installUrlOrBundleUrl"] = install_url_or_bundle_url + cmd_dict: T_JSON_DICT = { + "method": "PWA.install", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def uninstall(manifest_id: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Uninstalls the given manifest_id and closes any opened app windows. + + :param manifest_id: + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + cmd_dict: T_JSON_DICT = { + "method": "PWA.uninstall", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def launch( + manifest_id: str, url: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, target.TargetID]: + """ + Launches the installed web app, or an url in the same web app instead of the + default start url if it is provided. Returns a page Target.TargetID which + can be used to attach to via Target.attachToTarget or similar APIs. + + :param manifest_id: + :param url: *(Optional)* + :returns: ID of the tab target created as a result. + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + if url is not None: + params["url"] = url + cmd_dict: T_JSON_DICT = { + "method": "PWA.launch", + "params": params, + } + json = yield cmd_dict + return target.TargetID.from_json(json["targetId"])
+ + + +
+[docs] +def launch_files_in_app( + manifest_id: str, files: typing.List[str] +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[target.TargetID]]: + """ + Opens one or more local files from an installed web app identified by its + manifestId. The web app needs to have file handlers registered to process + the files. The API returns one or more page Target.TargetIDs which can be + used to attach to via Target.attachToTarget or similar APIs. + If some files in the parameters cannot be handled by the web app, they will + be ignored. If none of the files can be handled, this API returns an error. + If no files are provided as the parameter, this API also returns an error. + + According to the definition of the file handlers in the manifest file, one + Target.TargetID may represent a page handling one or more files. The order + of the returned Target.TargetIDs is not guaranteed. + + TODO(crbug.com/339454034): Check the existences of the input files. + + :param manifest_id: + :param files: + :returns: IDs of the tab targets created as the result. + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + params["files"] = [i for i in files] + cmd_dict: T_JSON_DICT = { + "method": "PWA.launchFilesInApp", + "params": params, + } + json = yield cmd_dict + return [target.TargetID.from_json(i) for i in json["targetIds"]]
+ + + +
+[docs] +def open_current_page_in_app( + manifest_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Opens the current page in its web app identified by the manifest id, needs + to be called on a page target. This function returns immediately without + waiting for the app to finish loading. + + :param manifest_id: + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + cmd_dict: T_JSON_DICT = { + "method": "PWA.openCurrentPageInApp", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def change_app_user_settings( + manifest_id: str, + link_capturing: typing.Optional[bool] = None, + display_mode: typing.Optional[DisplayMode] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Changes user settings of the web app identified by its manifestId. If the + app was not installed, this command returns an error. Unset parameters will + be ignored; unrecognized values will cause an error. + + Unlike the ones defined in the manifest files of the web apps, these + settings are provided by the browser and controlled by the users, they + impact the way the browser handling the web apps. + + See the comment of each parameter. + + :param manifest_id: + :param link_capturing: *(Optional)* If user allows the links clicked on by the user in the app's scope, or extended scope if the manifest has scope extensions and the flags ```DesktopPWAsLinkCapturingWithScopeExtensions```` and ````WebAppEnableScopeExtensions``` are enabled. Note, the API does not support resetting the linkCapturing to the initial value, uninstalling and installing the web app again will reset it. TODO(crbug.com/339453269): Setting this value on ChromeOS is not supported yet. + :param display_mode: *(Optional)* + """ + params: T_JSON_DICT = dict() + params["manifestId"] = manifest_id + if link_capturing is not None: + params["linkCapturing"] = link_capturing + if display_mode is not None: + params["displayMode"] = display_mode.to_json() + cmd_dict: T_JSON_DICT = { + "method": "PWA.changeAppUserSettings", + "params": params, + } + json = yield cmd_dict
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/runtime.html b/docs/_build/html/_modules/nodriver/cdp/runtime.html new file mode 100644 index 0000000..3306c45 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/runtime.html @@ -0,0 +1,2308 @@ + + + + + + + + nodriver.cdp.runtime - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.runtime

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Runtime
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class ScriptId(str): + """ + Unique script identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> ScriptId: + return cls(json) + + def __repr__(self): + return "ScriptId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class SerializationOptions: + """ + Represents options for serialization. Overrides ``generatePreview`` and ``returnByValue``. + """ + + serialization: str + + #: Deep serialization depth. Default is full depth. Respected only in ``deep`` serialization mode. + max_depth: typing.Optional[int] = None + + #: Embedder-specific parameters. For example if connected to V8 in Chrome these control DOM + #: serialization via ``maxNodeDepth: integer`` and ``includeShadowTree: "none" `` "open" `` "all"``. + #: Values can be only of type string or integer. + additional_parameters: typing.Optional[dict] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["serialization"] = self.serialization + if self.max_depth is not None: + json["maxDepth"] = self.max_depth + if self.additional_parameters is not None: + json["additionalParameters"] = self.additional_parameters + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SerializationOptions: + return cls( + serialization=str(json["serialization"]), + max_depth=( + int(json["maxDepth"]) + if json.get("maxDepth", None) is not None + else None + ), + additional_parameters=( + dict(json["additionalParameters"]) + if json.get("additionalParameters", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class DeepSerializedValue: + """ + Represents deep serialized value. + """ + + type_: str + + value: typing.Optional[typing.Any] = None + + object_id: typing.Optional[str] = None + + #: Set if value reference met more then once during serialization. In such + #: case, value is provided only to one of the serialized values. Unique + #: per value in the scope of one CDP call. + weak_local_object_reference: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.value is not None: + json["value"] = self.value + if self.object_id is not None: + json["objectId"] = self.object_id + if self.weak_local_object_reference is not None: + json["weakLocalObjectReference"] = self.weak_local_object_reference + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DeepSerializedValue: + return cls( + type_=str(json["type"]), + value=json["value"] if json.get("value", None) is not None else None, + object_id=( + str(json["objectId"]) + if json.get("objectId", None) is not None + else None + ), + weak_local_object_reference=( + int(json["weakLocalObjectReference"]) + if json.get("weakLocalObjectReference", None) is not None + else None + ), + )
+ + + +
+[docs] +class RemoteObjectId(str): + """ + Unique object identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RemoteObjectId: + return cls(json) + + def __repr__(self): + return "RemoteObjectId({})".format(super().__repr__())
+ + + +
+[docs] +class UnserializableValue(str): + """ + Primitive value which cannot be JSON-stringified. Includes values ``-0``, ``NaN``, ``Infinity``, + ``-Infinity``, and bigint literals. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> UnserializableValue: + return cls(json) + + def __repr__(self): + return "UnserializableValue({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class RemoteObject: + """ + Mirror object referencing original JavaScript object. + """ + + #: Object type. + type_: str + + #: Object subtype hint. Specified for ``object`` type values only. + #: NOTE: If you change anything here, make sure to also update + #: ``subtype`` in ``ObjectPreview`` and ``PropertyPreview`` below. + subtype: typing.Optional[str] = None + + #: Object class (constructor) name. Specified for ``object`` type values only. + class_name: typing.Optional[str] = None + + #: Remote object value in case of primitive values or JSON values (if it was requested). + value: typing.Optional[typing.Any] = None + + #: Primitive value which can not be JSON-stringified does not have ``value``, but gets this + #: property. + unserializable_value: typing.Optional[UnserializableValue] = None + + #: String representation of the object. + description: typing.Optional[str] = None + + #: Deep serialized value. + deep_serialized_value: typing.Optional[DeepSerializedValue] = None + + #: Unique object identifier (for non-primitive values). + object_id: typing.Optional[RemoteObjectId] = None + + #: Preview containing abbreviated property values. Specified for ``object`` type values only. + preview: typing.Optional[ObjectPreview] = None + + custom_preview: typing.Optional[CustomPreview] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + if self.subtype is not None: + json["subtype"] = self.subtype + if self.class_name is not None: + json["className"] = self.class_name + if self.value is not None: + json["value"] = self.value + if self.unserializable_value is not None: + json["unserializableValue"] = self.unserializable_value.to_json() + if self.description is not None: + json["description"] = self.description + if self.deep_serialized_value is not None: + json["deepSerializedValue"] = self.deep_serialized_value.to_json() + if self.object_id is not None: + json["objectId"] = self.object_id.to_json() + if self.preview is not None: + json["preview"] = self.preview.to_json() + if self.custom_preview is not None: + json["customPreview"] = self.custom_preview.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RemoteObject: + return cls( + type_=str(json["type"]), + subtype=( + str(json["subtype"]) if json.get("subtype", None) is not None else None + ), + class_name=( + str(json["className"]) + if json.get("className", None) is not None + else None + ), + value=json["value"] if json.get("value", None) is not None else None, + unserializable_value=( + UnserializableValue.from_json(json["unserializableValue"]) + if json.get("unserializableValue", None) is not None + else None + ), + description=( + str(json["description"]) + if json.get("description", None) is not None + else None + ), + deep_serialized_value=( + DeepSerializedValue.from_json(json["deepSerializedValue"]) + if json.get("deepSerializedValue", None) is not None + else None + ), + object_id=( + RemoteObjectId.from_json(json["objectId"]) + if json.get("objectId", None) is not None + else None + ), + preview=( + ObjectPreview.from_json(json["preview"]) + if json.get("preview", None) is not None + else None + ), + custom_preview=( + CustomPreview.from_json(json["customPreview"]) + if json.get("customPreview", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CustomPreview: + #: The JSON-stringified result of formatter.header(object, config) call. + #: It contains json ML array that represents RemoteObject. + header: str + + #: If formatter returns true as a result of formatter.hasBody call then bodyGetterId will + #: contain RemoteObjectId for the function that returns result of formatter.body(object, config) call. + #: The result value is json ML array. + body_getter_id: typing.Optional[RemoteObjectId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["header"] = self.header + if self.body_getter_id is not None: + json["bodyGetterId"] = self.body_getter_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CustomPreview: + return cls( + header=str(json["header"]), + body_getter_id=( + RemoteObjectId.from_json(json["bodyGetterId"]) + if json.get("bodyGetterId", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ObjectPreview: + """ + Object containing abbreviated remote object value. + """ + + #: Object type. + type_: str + + #: True iff some of the properties or entries of the original object did not fit. + overflow: bool + + #: List of the properties. + properties: typing.List[PropertyPreview] + + #: Object subtype hint. Specified for ``object`` type values only. + subtype: typing.Optional[str] = None + + #: String representation of the object. + description: typing.Optional[str] = None + + #: List of the entries. Specified for ``map`` and ``set`` subtype values only. + entries: typing.Optional[typing.List[EntryPreview]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + json["overflow"] = self.overflow + json["properties"] = [i.to_json() for i in self.properties] + if self.subtype is not None: + json["subtype"] = self.subtype + if self.description is not None: + json["description"] = self.description + if self.entries is not None: + json["entries"] = [i.to_json() for i in self.entries] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ObjectPreview: + return cls( + type_=str(json["type"]), + overflow=bool(json["overflow"]), + properties=[PropertyPreview.from_json(i) for i in json["properties"]], + subtype=( + str(json["subtype"]) if json.get("subtype", None) is not None else None + ), + description=( + str(json["description"]) + if json.get("description", None) is not None + else None + ), + entries=( + [EntryPreview.from_json(i) for i in json["entries"]] + if json.get("entries", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PropertyPreview: + #: Property name. + name: str + + #: Object type. Accessor means that the property itself is an accessor property. + type_: str + + #: User-friendly property value string. + value: typing.Optional[str] = None + + #: Nested value preview. + value_preview: typing.Optional[ObjectPreview] = None + + #: Object subtype hint. Specified for ``object`` type values only. + subtype: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["type"] = self.type_ + if self.value is not None: + json["value"] = self.value + if self.value_preview is not None: + json["valuePreview"] = self.value_preview.to_json() + if self.subtype is not None: + json["subtype"] = self.subtype + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PropertyPreview: + return cls( + name=str(json["name"]), + type_=str(json["type"]), + value=str(json["value"]) if json.get("value", None) is not None else None, + value_preview=( + ObjectPreview.from_json(json["valuePreview"]) + if json.get("valuePreview", None) is not None + else None + ), + subtype=( + str(json["subtype"]) if json.get("subtype", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class EntryPreview: + #: Preview of the value. + value: ObjectPreview + + #: Preview of the key. Specified for map-like collection entries. + key: typing.Optional[ObjectPreview] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["value"] = self.value.to_json() + if self.key is not None: + json["key"] = self.key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> EntryPreview: + return cls( + value=ObjectPreview.from_json(json["value"]), + key=( + ObjectPreview.from_json(json["key"]) + if json.get("key", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PropertyDescriptor: + """ + Object property descriptor. + """ + + #: Property name or symbol description. + name: str + + #: True if the type of this property descriptor may be changed and if the property may be + #: deleted from the corresponding object. + configurable: bool + + #: True if this property shows up during enumeration of the properties on the corresponding + #: object. + enumerable: bool + + #: The value associated with the property. + value: typing.Optional[RemoteObject] = None + + #: True if the value associated with the property may be changed (data descriptors only). + writable: typing.Optional[bool] = None + + #: A function which serves as a getter for the property, or ``undefined`` if there is no getter + #: (accessor descriptors only). + get: typing.Optional[RemoteObject] = None + + #: A function which serves as a setter for the property, or ``undefined`` if there is no setter + #: (accessor descriptors only). + set_: typing.Optional[RemoteObject] = None + + #: True if the result was thrown during the evaluation. + was_thrown: typing.Optional[bool] = None + + #: True if the property is owned for the object. + is_own: typing.Optional[bool] = None + + #: Property symbol object, if the property is of the ``symbol`` type. + symbol: typing.Optional[RemoteObject] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["configurable"] = self.configurable + json["enumerable"] = self.enumerable + if self.value is not None: + json["value"] = self.value.to_json() + if self.writable is not None: + json["writable"] = self.writable + if self.get is not None: + json["get"] = self.get.to_json() + if self.set_ is not None: + json["set"] = self.set_.to_json() + if self.was_thrown is not None: + json["wasThrown"] = self.was_thrown + if self.is_own is not None: + json["isOwn"] = self.is_own + if self.symbol is not None: + json["symbol"] = self.symbol.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PropertyDescriptor: + return cls( + name=str(json["name"]), + configurable=bool(json["configurable"]), + enumerable=bool(json["enumerable"]), + value=( + RemoteObject.from_json(json["value"]) + if json.get("value", None) is not None + else None + ), + writable=( + bool(json["writable"]) + if json.get("writable", None) is not None + else None + ), + get=( + RemoteObject.from_json(json["get"]) + if json.get("get", None) is not None + else None + ), + set_=( + RemoteObject.from_json(json["set"]) + if json.get("set", None) is not None + else None + ), + was_thrown=( + bool(json["wasThrown"]) + if json.get("wasThrown", None) is not None + else None + ), + is_own=bool(json["isOwn"]) if json.get("isOwn", None) is not None else None, + symbol=( + RemoteObject.from_json(json["symbol"]) + if json.get("symbol", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class InternalPropertyDescriptor: + """ + Object internal property descriptor. This property isn't normally visible in JavaScript code. + """ + + #: Conventional property name. + name: str + + #: The value associated with the property. + value: typing.Optional[RemoteObject] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + if self.value is not None: + json["value"] = self.value.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InternalPropertyDescriptor: + return cls( + name=str(json["name"]), + value=( + RemoteObject.from_json(json["value"]) + if json.get("value", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class PrivatePropertyDescriptor: + """ + Object private field descriptor. + """ + + #: Private property name. + name: str + + #: The value associated with the private property. + value: typing.Optional[RemoteObject] = None + + #: A function which serves as a getter for the private property, + #: or ``undefined`` if there is no getter (accessor descriptors only). + get: typing.Optional[RemoteObject] = None + + #: A function which serves as a setter for the private property, + #: or ``undefined`` if there is no setter (accessor descriptors only). + set_: typing.Optional[RemoteObject] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + if self.value is not None: + json["value"] = self.value.to_json() + if self.get is not None: + json["get"] = self.get.to_json() + if self.set_ is not None: + json["set"] = self.set_.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> PrivatePropertyDescriptor: + return cls( + name=str(json["name"]), + value=( + RemoteObject.from_json(json["value"]) + if json.get("value", None) is not None + else None + ), + get=( + RemoteObject.from_json(json["get"]) + if json.get("get", None) is not None + else None + ), + set_=( + RemoteObject.from_json(json["set"]) + if json.get("set", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class CallArgument: + """ + Represents function call argument. Either remote object id ``objectId``, primitive ``value``, + unserializable primitive value or neither of (for undefined) them should be specified. + """ + + #: Primitive value or serializable javascript object. + value: typing.Optional[typing.Any] = None + + #: Primitive value which can not be JSON-stringified. + unserializable_value: typing.Optional[UnserializableValue] = None + + #: Remote object handle. + object_id: typing.Optional[RemoteObjectId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.value is not None: + json["value"] = self.value + if self.unserializable_value is not None: + json["unserializableValue"] = self.unserializable_value.to_json() + if self.object_id is not None: + json["objectId"] = self.object_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CallArgument: + return cls( + value=json["value"] if json.get("value", None) is not None else None, + unserializable_value=( + UnserializableValue.from_json(json["unserializableValue"]) + if json.get("unserializableValue", None) is not None + else None + ), + object_id=( + RemoteObjectId.from_json(json["objectId"]) + if json.get("objectId", None) is not None + else None + ), + )
+ + + +
+[docs] +class ExecutionContextId(int): + """ + Id of an execution context. + """ + + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> ExecutionContextId: + return cls(json) + + def __repr__(self): + return "ExecutionContextId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class ExecutionContextDescription: + """ + Description of an isolated world. + """ + + #: Unique id of the execution context. It can be used to specify in which execution context + #: script evaluation should be performed. + id_: ExecutionContextId + + #: Execution context origin. + origin: str + + #: Human readable name describing given context. + name: str + + #: A system-unique execution context identifier. Unlike the id, this is unique across + #: multiple processes, so can be reliably used to identify specific context while backend + #: performs a cross-process navigation. + unique_id: str + + #: Embedder-specific auxiliary data likely matching {isDefault: boolean, type: 'default'``'isolated'``'worker', frameId: string} + aux_data: typing.Optional[dict] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_.to_json() + json["origin"] = self.origin + json["name"] = self.name + json["uniqueId"] = self.unique_id + if self.aux_data is not None: + json["auxData"] = self.aux_data + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExecutionContextDescription: + return cls( + id_=ExecutionContextId.from_json(json["id"]), + origin=str(json["origin"]), + name=str(json["name"]), + unique_id=str(json["uniqueId"]), + aux_data=( + dict(json["auxData"]) if json.get("auxData", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class ExceptionDetails: + """ + Detailed information about exception (or error) that was thrown during script compilation or + execution. + """ + + #: Exception id. + exception_id: int + + #: Exception text, which should be used together with exception object when available. + text: str + + #: Line number of the exception location (0-based). + line_number: int + + #: Column number of the exception location (0-based). + column_number: int + + #: Script ID of the exception location. + script_id: typing.Optional[ScriptId] = None + + #: URL of the exception location, to be used when the script was not reported. + url: typing.Optional[str] = None + + #: JavaScript stack trace if available. + stack_trace: typing.Optional[StackTrace] = None + + #: Exception object if available. + exception: typing.Optional[RemoteObject] = None + + #: Identifier of the context where exception happened. + execution_context_id: typing.Optional[ExecutionContextId] = None + + #: Dictionary with entries of meta data that the client associated + #: with this exception, such as information about associated network + #: requests, etc. + exception_meta_data: typing.Optional[dict] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["exceptionId"] = self.exception_id + json["text"] = self.text + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + if self.script_id is not None: + json["scriptId"] = self.script_id.to_json() + if self.url is not None: + json["url"] = self.url + if self.stack_trace is not None: + json["stackTrace"] = self.stack_trace.to_json() + if self.exception is not None: + json["exception"] = self.exception.to_json() + if self.execution_context_id is not None: + json["executionContextId"] = self.execution_context_id.to_json() + if self.exception_meta_data is not None: + json["exceptionMetaData"] = self.exception_meta_data + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExceptionDetails: + return cls( + exception_id=int(json["exceptionId"]), + text=str(json["text"]), + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + script_id=( + ScriptId.from_json(json["scriptId"]) + if json.get("scriptId", None) is not None + else None + ), + url=str(json["url"]) if json.get("url", None) is not None else None, + stack_trace=( + StackTrace.from_json(json["stackTrace"]) + if json.get("stackTrace", None) is not None + else None + ), + exception=( + RemoteObject.from_json(json["exception"]) + if json.get("exception", None) is not None + else None + ), + execution_context_id=( + ExecutionContextId.from_json(json["executionContextId"]) + if json.get("executionContextId", None) is not None + else None + ), + exception_meta_data=( + dict(json["exceptionMetaData"]) + if json.get("exceptionMetaData", None) is not None + else None + ), + )
+ + + +
+[docs] +class Timestamp(float): + """ + Number of milliseconds since epoch. + """ + + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> Timestamp: + return cls(json) + + def __repr__(self): + return "Timestamp({})".format(super().__repr__())
+ + + +
+[docs] +class TimeDelta(float): + """ + Number of milliseconds. + """ + + def to_json(self) -> float: + return self + + @classmethod + def from_json(cls, json: float) -> TimeDelta: + return cls(json) + + def __repr__(self): + return "TimeDelta({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class CallFrame: + """ + Stack entry for runtime errors and assertions. + """ + + #: JavaScript function name. + function_name: str + + #: JavaScript script id. + script_id: ScriptId + + #: JavaScript script name or url. + url: str + + #: JavaScript script line number (0-based). + line_number: int + + #: JavaScript script column number (0-based). + column_number: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["functionName"] = self.function_name + json["scriptId"] = self.script_id.to_json() + json["url"] = self.url + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CallFrame: + return cls( + function_name=str(json["functionName"]), + script_id=ScriptId.from_json(json["scriptId"]), + url=str(json["url"]), + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + )
+ + + +
+[docs] +@dataclass +class StackTrace: + """ + Call frames for assertions or error messages. + """ + + #: JavaScript function name. + call_frames: typing.List[CallFrame] + + #: String label of this stack trace. For async traces this may be a name of the function that + #: initiated the async call. + description: typing.Optional[str] = None + + #: Asynchronous JavaScript stack trace that preceded this stack, if available. + parent: typing.Optional[StackTrace] = None + + #: Asynchronous JavaScript stack trace that preceded this stack, if available. + parent_id: typing.Optional[StackTraceId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["callFrames"] = [i.to_json() for i in self.call_frames] + if self.description is not None: + json["description"] = self.description + if self.parent is not None: + json["parent"] = self.parent.to_json() + if self.parent_id is not None: + json["parentId"] = self.parent_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StackTrace: + return cls( + call_frames=[CallFrame.from_json(i) for i in json["callFrames"]], + description=( + str(json["description"]) + if json.get("description", None) is not None + else None + ), + parent=( + StackTrace.from_json(json["parent"]) + if json.get("parent", None) is not None + else None + ), + parent_id=( + StackTraceId.from_json(json["parentId"]) + if json.get("parentId", None) is not None + else None + ), + )
+ + + +
+[docs] +class UniqueDebuggerId(str): + """ + Unique identifier of current debugger. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> UniqueDebuggerId: + return cls(json) + + def __repr__(self): + return "UniqueDebuggerId({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class StackTraceId: + """ + If ``debuggerId`` is set stack trace comes from another debugger and can be resolved there. This + allows to track cross-debugger calls. See ``Runtime.StackTrace`` and ``Debugger.paused`` for usages. + """ + + id_: str + + debugger_id: typing.Optional[UniqueDebuggerId] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["id"] = self.id_ + if self.debugger_id is not None: + json["debuggerId"] = self.debugger_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StackTraceId: + return cls( + id_=str(json["id"]), + debugger_id=( + UniqueDebuggerId.from_json(json["debuggerId"]) + if json.get("debuggerId", None) is not None + else None + ), + )
+ + + +
+[docs] +def await_promise( + promise_object_id: RemoteObjectId, + return_by_value: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]], +]: + """ + Add handler to promise with given promise object id. + + :param promise_object_id: Identifier of the promise. + :param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value. + :param generate_preview: *(Optional)* Whether preview should be generated for the result. + :returns: A tuple with the following items: + + 0. **result** - Promise result. Will contain rejected value if promise was rejected. + 1. **exceptionDetails** - *(Optional)* Exception details if stack strace is available. + """ + params: T_JSON_DICT = dict() + params["promiseObjectId"] = promise_object_id.to_json() + if return_by_value is not None: + params["returnByValue"] = return_by_value + if generate_preview is not None: + params["generatePreview"] = generate_preview + cmd_dict: T_JSON_DICT = { + "method": "Runtime.awaitPromise", + "params": params, + } + json = yield cmd_dict + return ( + RemoteObject.from_json(json["result"]), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def call_function_on( + function_declaration: str, + object_id: typing.Optional[RemoteObjectId] = None, + arguments: typing.Optional[typing.List[CallArgument]] = None, + silent: typing.Optional[bool] = None, + return_by_value: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, + user_gesture: typing.Optional[bool] = None, + await_promise: typing.Optional[bool] = None, + execution_context_id: typing.Optional[ExecutionContextId] = None, + object_group: typing.Optional[str] = None, + throw_on_side_effect: typing.Optional[bool] = None, + unique_context_id: typing.Optional[str] = None, + serialization_options: typing.Optional[SerializationOptions] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]], +]: + """ + Calls function with given declaration on the given object. Object group of the result is + inherited from the target object. + + :param function_declaration: Declaration of the function to call. + :param object_id: *(Optional)* Identifier of the object to call function on. Either objectId or executionContextId should be specified. + :param arguments: *(Optional)* Call arguments. All call arguments must belong to the same JavaScript world as the target object. + :param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state. + :param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value. Can be overriden by ````serializationOptions````. + :param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result. + :param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI. + :param await_promise: *(Optional)* Whether execution should ````await```` for resulting value and return once awaited promise is resolved. + :param execution_context_id: *(Optional)* Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified. + :param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object. + :param throw_on_side_effect: **(EXPERIMENTAL)** *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation. + :param unique_context_id: **(EXPERIMENTAL)** *(Optional)* An alternative way to specify the execution context to call function on. Compared to contextId that may be reused across processes, this is guaranteed to be system-unique, so it can be used to prevent accidental function call in context different than intended (e.g. as a result of navigation across process boundaries). This is mutually exclusive with ````executionContextId````. + :param serialization_options: **(EXPERIMENTAL)** *(Optional)* Specifies the result serialization. If provided, overrides ````generatePreview```` and ````returnByValue```. + :returns: A tuple with the following items: + + 0. **result** - Call result. + 1. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["functionDeclaration"] = function_declaration + if object_id is not None: + params["objectId"] = object_id.to_json() + if arguments is not None: + params["arguments"] = [i.to_json() for i in arguments] + if silent is not None: + params["silent"] = silent + if return_by_value is not None: + params["returnByValue"] = return_by_value + if generate_preview is not None: + params["generatePreview"] = generate_preview + if user_gesture is not None: + params["userGesture"] = user_gesture + if await_promise is not None: + params["awaitPromise"] = await_promise + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + if object_group is not None: + params["objectGroup"] = object_group + if throw_on_side_effect is not None: + params["throwOnSideEffect"] = throw_on_side_effect + if unique_context_id is not None: + params["uniqueContextId"] = unique_context_id + if serialization_options is not None: + params["serializationOptions"] = serialization_options.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.callFunctionOn", + "params": params, + } + json = yield cmd_dict + return ( + RemoteObject.from_json(json["result"]), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def compile_script( + expression: str, + source_url: str, + persist_script: bool, + execution_context_id: typing.Optional[ExecutionContextId] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[typing.Optional[ScriptId], typing.Optional[ExceptionDetails]], +]: + """ + Compiles expression. + + :param expression: Expression to compile. + :param source_url: Source url to be set for the script. + :param persist_script: Specifies whether the compiled script should be persisted. + :param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page. + :returns: A tuple with the following items: + + 0. **scriptId** - *(Optional)* Id of the script. + 1. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["expression"] = expression + params["sourceURL"] = source_url + params["persistScript"] = persist_script + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.compileScript", + "params": params, + } + json = yield cmd_dict + return ( + ( + ScriptId.from_json(json["scriptId"]) + if json.get("scriptId", None) is not None + else None + ), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables reporting of execution contexts creation. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def discard_console_entries() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Discards collected exceptions and console API calls. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.discardConsoleEntries", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables reporting of execution contexts creation by means of ``executionContextCreated`` event. + When the reporting gets enabled the event will be sent immediately for each existing execution + context. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def evaluate( + expression: str, + object_group: typing.Optional[str] = None, + include_command_line_api: typing.Optional[bool] = None, + silent: typing.Optional[bool] = None, + context_id: typing.Optional[ExecutionContextId] = None, + return_by_value: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, + user_gesture: typing.Optional[bool] = None, + await_promise: typing.Optional[bool] = None, + throw_on_side_effect: typing.Optional[bool] = None, + timeout: typing.Optional[TimeDelta] = None, + disable_breaks: typing.Optional[bool] = None, + repl_mode: typing.Optional[bool] = None, + allow_unsafe_eval_blocked_by_csp: typing.Optional[bool] = None, + unique_context_id: typing.Optional[str] = None, + serialization_options: typing.Optional[SerializationOptions] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]], +]: + """ + Evaluates expression on global object. + + :param expression: Expression to evaluate. + :param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. + :param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation. + :param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state. + :param context_id: *(Optional)* Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page. This is mutually exclusive with ````uniqueContextId````, which offers an alternative way to identify the execution context that is more reliable in a multi-process environment. + :param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value. + :param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result. + :param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI. + :param await_promise: *(Optional)* Whether execution should ````await```` for resulting value and return once awaited promise is resolved. + :param throw_on_side_effect: **(EXPERIMENTAL)** *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation. This implies ````disableBreaks```` below. + :param timeout: **(EXPERIMENTAL)** *(Optional)* Terminate execution after timing out (number of milliseconds). + :param disable_breaks: **(EXPERIMENTAL)** *(Optional)* Disable breakpoints during execution. + :param repl_mode: **(EXPERIMENTAL)** *(Optional)* Setting this flag to true enables ````let```` re-declaration and top-level ````await````. Note that ````let```` variables can only be re-declared if they originate from ````replMode```` themselves. + :param allow_unsafe_eval_blocked_by_csp: **(EXPERIMENTAL)** *(Optional)* The Content Security Policy (CSP) for the target might block 'unsafe-eval' which includes eval(), Function(), setTimeout() and setInterval() when called with non-callable arguments. This flag bypasses CSP for this evaluation and allows unsafe-eval. Defaults to true. + :param unique_context_id: **(EXPERIMENTAL)** *(Optional)* An alternative way to specify the execution context to evaluate in. Compared to contextId that may be reused across processes, this is guaranteed to be system-unique, so it can be used to prevent accidental evaluation of the expression in context different than intended (e.g. as a result of navigation across process boundaries). This is mutually exclusive with ````contextId````. + :param serialization_options: **(EXPERIMENTAL)** *(Optional)* Specifies the result serialization. If provided, overrides ````generatePreview```` and ````returnByValue```. + :returns: A tuple with the following items: + + 0. **result** - Evaluation result. + 1. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["expression"] = expression + if object_group is not None: + params["objectGroup"] = object_group + if include_command_line_api is not None: + params["includeCommandLineAPI"] = include_command_line_api + if silent is not None: + params["silent"] = silent + if context_id is not None: + params["contextId"] = context_id.to_json() + if return_by_value is not None: + params["returnByValue"] = return_by_value + if generate_preview is not None: + params["generatePreview"] = generate_preview + if user_gesture is not None: + params["userGesture"] = user_gesture + if await_promise is not None: + params["awaitPromise"] = await_promise + if throw_on_side_effect is not None: + params["throwOnSideEffect"] = throw_on_side_effect + if timeout is not None: + params["timeout"] = timeout.to_json() + if disable_breaks is not None: + params["disableBreaks"] = disable_breaks + if repl_mode is not None: + params["replMode"] = repl_mode + if allow_unsafe_eval_blocked_by_csp is not None: + params["allowUnsafeEvalBlockedByCSP"] = allow_unsafe_eval_blocked_by_csp + if unique_context_id is not None: + params["uniqueContextId"] = unique_context_id + if serialization_options is not None: + params["serializationOptions"] = serialization_options.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.evaluate", + "params": params, + } + json = yield cmd_dict + return ( + RemoteObject.from_json(json["result"]), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def get_isolate_id() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, str]: + """ + Returns the isolate id. + + **EXPERIMENTAL** + + :returns: The isolate id. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.getIsolateId", + } + json = yield cmd_dict + return str(json["id"])
+ + + +
+[docs] +def get_heap_usage() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[float, float]] +): + """ + Returns the JavaScript heap usage. + It is the total usage of the corresponding isolate not scoped to a particular Runtime. + + **EXPERIMENTAL** + + :returns: A tuple with the following items: + + 0. **usedSize** - Used heap size in bytes. + 1. **totalSize** - Allocated heap size in bytes. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.getHeapUsage", + } + json = yield cmd_dict + return (float(json["usedSize"]), float(json["totalSize"]))
+ + + +
+[docs] +def get_properties( + object_id: RemoteObjectId, + own_properties: typing.Optional[bool] = None, + accessor_properties_only: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, + non_indexed_properties_only: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[ + typing.List[PropertyDescriptor], + typing.Optional[typing.List[InternalPropertyDescriptor]], + typing.Optional[typing.List[PrivatePropertyDescriptor]], + typing.Optional[ExceptionDetails], + ], +]: + """ + Returns properties of a given object. Object group of the result is inherited from the target + object. + + :param object_id: Identifier of the object to return properties for. + :param own_properties: *(Optional)* If true, returns properties belonging only to the element itself, not to its prototype chain. + :param accessor_properties_only: **(EXPERIMENTAL)** *(Optional)* If true, returns accessor properties (with getter/setter) only; internal properties are not returned either. + :param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the results. + :param non_indexed_properties_only: **(EXPERIMENTAL)** *(Optional)* If true, returns non-indexed properties only. + :returns: A tuple with the following items: + + 0. **result** - Object properties. + 1. **internalProperties** - *(Optional)* Internal object properties (only of the element itself). + 2. **privateProperties** - *(Optional)* Object private properties. + 3. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + if own_properties is not None: + params["ownProperties"] = own_properties + if accessor_properties_only is not None: + params["accessorPropertiesOnly"] = accessor_properties_only + if generate_preview is not None: + params["generatePreview"] = generate_preview + if non_indexed_properties_only is not None: + params["nonIndexedPropertiesOnly"] = non_indexed_properties_only + cmd_dict: T_JSON_DICT = { + "method": "Runtime.getProperties", + "params": params, + } + json = yield cmd_dict + return ( + [PropertyDescriptor.from_json(i) for i in json["result"]], + ( + [ + InternalPropertyDescriptor.from_json(i) + for i in json["internalProperties"] + ] + if json.get("internalProperties", None) is not None + else None + ), + ( + [PrivatePropertyDescriptor.from_json(i) for i in json["privateProperties"]] + if json.get("privateProperties", None) is not None + else None + ), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def global_lexical_scope_names( + execution_context_id: typing.Optional[ExecutionContextId] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Returns all let, const and class variables from global scope. + + :param execution_context_id: *(Optional)* Specifies in which execution context to lookup global scope variables. + :returns: + """ + params: T_JSON_DICT = dict() + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.globalLexicalScopeNames", + "params": params, + } + json = yield cmd_dict + return [str(i) for i in json["names"]]
+ + + +
+[docs] +def query_objects( + prototype_object_id: RemoteObjectId, object_group: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, RemoteObject]: + """ + :param prototype_object_id: Identifier of the prototype to return objects for. + :param object_group: *(Optional)* Symbolic group name that can be used to release the results. + :returns: Array with objects. + """ + params: T_JSON_DICT = dict() + params["prototypeObjectId"] = prototype_object_id.to_json() + if object_group is not None: + params["objectGroup"] = object_group + cmd_dict: T_JSON_DICT = { + "method": "Runtime.queryObjects", + "params": params, + } + json = yield cmd_dict + return RemoteObject.from_json(json["objects"])
+ + + +
+[docs] +def release_object( + object_id: RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Releases remote object with given id. + + :param object_id: Identifier of the object to release. + """ + params: T_JSON_DICT = dict() + params["objectId"] = object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.releaseObject", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def release_object_group( + object_group: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Releases all remote objects that belong to a given group. + + :param object_group: Symbolic object group name. + """ + params: T_JSON_DICT = dict() + params["objectGroup"] = object_group + cmd_dict: T_JSON_DICT = { + "method": "Runtime.releaseObjectGroup", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def run_if_waiting_for_debugger() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Tells inspected instance to run if it was waiting for debugger to attach. + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.runIfWaitingForDebugger", + } + json = yield cmd_dict
+ + + +
+[docs] +def run_script( + script_id: ScriptId, + execution_context_id: typing.Optional[ExecutionContextId] = None, + object_group: typing.Optional[str] = None, + silent: typing.Optional[bool] = None, + include_command_line_api: typing.Optional[bool] = None, + return_by_value: typing.Optional[bool] = None, + generate_preview: typing.Optional[bool] = None, + await_promise: typing.Optional[bool] = None, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]], +]: + """ + Runs script with given id in a given context. + + :param script_id: Id of the script to run. + :param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page. + :param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. + :param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state. + :param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation. + :param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value. + :param generate_preview: *(Optional)* Whether preview should be generated for the result. + :param await_promise: *(Optional)* Whether execution should ````await``` for resulting value and return once awaited promise is resolved. + :returns: A tuple with the following items: + + 0. **result** - Run result. + 1. **exceptionDetails** - *(Optional)* Exception details. + """ + params: T_JSON_DICT = dict() + params["scriptId"] = script_id.to_json() + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + if object_group is not None: + params["objectGroup"] = object_group + if silent is not None: + params["silent"] = silent + if include_command_line_api is not None: + params["includeCommandLineAPI"] = include_command_line_api + if return_by_value is not None: + params["returnByValue"] = return_by_value + if generate_preview is not None: + params["generatePreview"] = generate_preview + if await_promise is not None: + params["awaitPromise"] = await_promise + cmd_dict: T_JSON_DICT = { + "method": "Runtime.runScript", + "params": params, + } + json = yield cmd_dict + return ( + RemoteObject.from_json(json["result"]), + ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + ), + )
+ + + +
+[docs] +def set_async_call_stack_depth( + max_depth: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables or disables async call stacks tracking. + + :param max_depth: Maximum depth of async call stacks. Setting to ```0``` will effectively disable collecting async call stacks (default). + """ + params: T_JSON_DICT = dict() + params["maxDepth"] = max_depth + cmd_dict: T_JSON_DICT = { + "method": "Runtime.setAsyncCallStackDepth", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_custom_object_formatter_enabled( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param enabled: + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Runtime.setCustomObjectFormatterEnabled", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_max_call_stack_size_to_capture( + size: int, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + + + **EXPERIMENTAL** + + :param size: + """ + params: T_JSON_DICT = dict() + params["size"] = size + cmd_dict: T_JSON_DICT = { + "method": "Runtime.setMaxCallStackSizeToCapture", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def terminate_execution() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Terminate current or next JavaScript execution. + Will cancel the termination when the outer-most script execution ends. + + **EXPERIMENTAL** + """ + cmd_dict: T_JSON_DICT = { + "method": "Runtime.terminateExecution", + } + json = yield cmd_dict
+ + + +
+[docs] +def add_binding( + name: str, + execution_context_id: typing.Optional[ExecutionContextId] = None, + execution_context_name: typing.Optional[str] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + If executionContextId is empty, adds binding with the given name on the + global objects of all inspected contexts, including those created later, + bindings survive reloads. + Binding function takes exactly one argument, this argument should be string, + in case of any other input, function throws an exception. + Each binding function call produces Runtime.bindingCalled notification. + + :param name: + :param execution_context_id: **(DEPRECATED)** **(EXPERIMENTAL)** *(Optional)* If specified, the binding would only be exposed to the specified execution context. If omitted and ```executionContextName```` is not set, the binding is exposed to all execution contexts of the target. This parameter is mutually exclusive with ````executionContextName````. Deprecated in favor of ````executionContextName```` due to an unclear use case and bugs in implementation (crbug.com/1169639). ````executionContextId```` will be removed in the future. + :param execution_context_name: *(Optional)* If specified, the binding is exposed to the executionContext with matching name, even for contexts created after the binding is added. See also ````ExecutionContext.name```` and ````worldName```` parameter to ````Page.addScriptToEvaluateOnNewDocument````. This parameter is mutually exclusive with ````executionContextId```. + """ + params: T_JSON_DICT = dict() + params["name"] = name + if execution_context_id is not None: + params["executionContextId"] = execution_context_id.to_json() + if execution_context_name is not None: + params["executionContextName"] = execution_context_name + cmd_dict: T_JSON_DICT = { + "method": "Runtime.addBinding", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_binding(name: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + This method does not remove binding function from global object but + unsubscribes current runtime agent from Runtime.bindingCalled notifications. + + :param name: + """ + params: T_JSON_DICT = dict() + params["name"] = name + cmd_dict: T_JSON_DICT = { + "method": "Runtime.removeBinding", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_exception_details( + error_object_id: RemoteObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Optional[ExceptionDetails]]: + """ + This method tries to lookup and populate exception details for a + JavaScript Error object. + Note that the stackTrace portion of the resulting exceptionDetails will + only be populated if the Runtime domain was enabled at the time when the + Error was thrown. + + **EXPERIMENTAL** + + :param error_object_id: The error object for which to resolve the exception details. + :returns: + """ + params: T_JSON_DICT = dict() + params["errorObjectId"] = error_object_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Runtime.getExceptionDetails", + "params": params, + } + json = yield cmd_dict + return ( + ExceptionDetails.from_json(json["exceptionDetails"]) + if json.get("exceptionDetails", None) is not None + else None + )
+ + + +
+[docs] +@event_class("Runtime.bindingCalled") +@dataclass +class BindingCalled: + """ + **EXPERIMENTAL** + + Notification is issued every time when binding is called. + """ + + name: str + payload: str + #: Identifier of the context where the call was made. + execution_context_id: ExecutionContextId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BindingCalled: + return cls( + name=str(json["name"]), + payload=str(json["payload"]), + execution_context_id=ExecutionContextId.from_json( + json["executionContextId"] + ), + )
+ + + +
+[docs] +@event_class("Runtime.consoleAPICalled") +@dataclass +class ConsoleAPICalled: + """ + Issued when console API was called. + """ + + #: Type of the call. + type_: str + #: Call arguments. + args: typing.List[RemoteObject] + #: Identifier of the context where the call was made. + execution_context_id: ExecutionContextId + #: Call timestamp. + timestamp: Timestamp + #: Stack trace captured when the call was made. The async stack chain is automatically reported for + #: the following call types: ``assert``, ``error``, ``trace``, ``warning``. For other types the async call + #: chain can be retrieved using ``Debugger.getStackTrace`` and ``stackTrace.parentId`` field. + stack_trace: typing.Optional[StackTrace] + #: Console context descriptor for calls on non-default console context (not console.*): + #: 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call + #: on named context. + context: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ConsoleAPICalled: + return cls( + type_=str(json["type"]), + args=[RemoteObject.from_json(i) for i in json["args"]], + execution_context_id=ExecutionContextId.from_json( + json["executionContextId"] + ), + timestamp=Timestamp.from_json(json["timestamp"]), + stack_trace=( + StackTrace.from_json(json["stackTrace"]) + if json.get("stackTrace", None) is not None + else None + ), + context=( + str(json["context"]) if json.get("context", None) is not None else None + ), + )
+ + + +
+[docs] +@event_class("Runtime.exceptionRevoked") +@dataclass +class ExceptionRevoked: + """ + Issued when unhandled exception was revoked. + """ + + #: Reason describing why exception was revoked. + reason: str + #: The id of revoked exception, as reported in ``exceptionThrown``. + exception_id: int + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExceptionRevoked: + return cls(reason=str(json["reason"]), exception_id=int(json["exceptionId"]))
+ + + +
+[docs] +@event_class("Runtime.exceptionThrown") +@dataclass +class ExceptionThrown: + """ + Issued when exception was thrown and unhandled. + """ + + #: Timestamp of the exception. + timestamp: Timestamp + exception_details: ExceptionDetails + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExceptionThrown: + return cls( + timestamp=Timestamp.from_json(json["timestamp"]), + exception_details=ExceptionDetails.from_json(json["exceptionDetails"]), + )
+ + + +
+[docs] +@event_class("Runtime.executionContextCreated") +@dataclass +class ExecutionContextCreated: + """ + Issued when new execution context is created. + """ + + #: A newly created execution context. + context: ExecutionContextDescription + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExecutionContextCreated: + return cls(context=ExecutionContextDescription.from_json(json["context"]))
+ + + +
+[docs] +@event_class("Runtime.executionContextDestroyed") +@dataclass +class ExecutionContextDestroyed: + """ + Issued when execution context is destroyed. + """ + + #: Id of the destroyed context + execution_context_id: ExecutionContextId + #: Unique Id of the destroyed context + execution_context_unique_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExecutionContextDestroyed: + return cls( + execution_context_id=ExecutionContextId.from_json( + json["executionContextId"] + ), + execution_context_unique_id=str(json["executionContextUniqueId"]), + )
+ + + +
+[docs] +@event_class("Runtime.executionContextsCleared") +@dataclass +class ExecutionContextsCleared: + """ + Issued when all executionContexts were cleared in browser + """ + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ExecutionContextsCleared: + return cls()
+ + + +
+[docs] +@event_class("Runtime.inspectRequested") +@dataclass +class InspectRequested: + """ + Issued when object should be inspected (for example, as a result of inspect() command line API + call). + """ + + object_: RemoteObject + hints: dict + #: Identifier of the context where the call was made. + execution_context_id: typing.Optional[ExecutionContextId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InspectRequested: + return cls( + object_=RemoteObject.from_json(json["object"]), + hints=dict(json["hints"]), + execution_context_id=( + ExecutionContextId.from_json(json["executionContextId"]) + if json.get("executionContextId", None) is not None + else None + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/schema.html b/docs/_build/html/_modules/nodriver/cdp/schema.html new file mode 100644 index 0000000..fb6eb37 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/schema.html @@ -0,0 +1,359 @@ + + + + + + + + nodriver.cdp.schema - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.schema

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Schema
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +@dataclass +class Domain: + """ + Description of the protocol domain. + """ + + #: Domain name. + name: str + + #: Domain version. + version: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["name"] = self.name + json["version"] = self.version + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Domain: + return cls( + name=str(json["name"]), + version=str(json["version"]), + )
+ + + +
+[docs] +def get_domains() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Domain]]: + """ + Returns supported domains. + + :returns: List of supported domains. + """ + cmd_dict: T_JSON_DICT = { + "method": "Schema.getDomains", + } + json = yield cmd_dict + return [Domain.from_json(i) for i in json["domains"]]
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/security.html b/docs/_build/html/_modules/nodriver/cdp/security.html new file mode 100644 index 0000000..1ec9000 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/security.html @@ -0,0 +1,922 @@ + + + + + + + + nodriver.cdp.security - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.security

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Security
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import network
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class CertificateId(int): + """ + An internal certificate ID value. + """ + + def to_json(self) -> int: + return self + + @classmethod + def from_json(cls, json: int) -> CertificateId: + return cls(json) + + def __repr__(self): + return "CertificateId({})".format(super().__repr__())
+ + + +
+[docs] +class MixedContentType(enum.Enum): + """ + A description of mixed content (HTTP resources on HTTPS pages), as defined by + https://www.w3.org/TR/mixed-content/#categories + """ + + BLOCKABLE = "blockable" + OPTIONALLY_BLOCKABLE = "optionally-blockable" + NONE = "none" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> MixedContentType: + return cls(json)
+ + + +
+[docs] +class SecurityState(enum.Enum): + """ + The security level of a page or resource. + """ + + UNKNOWN = "unknown" + NEUTRAL = "neutral" + INSECURE = "insecure" + SECURE = "secure" + INFO = "info" + INSECURE_BROKEN = "insecure-broken" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SecurityState: + return cls(json)
+ + + +
+[docs] +@dataclass +class CertificateSecurityState: + """ + Details about the security state of the page certificate. + """ + + #: Protocol name (e.g. "TLS 1.2" or "QUIC"). + protocol: str + + #: Key Exchange used by the connection, or the empty string if not applicable. + key_exchange: str + + #: Cipher name. + cipher: str + + #: Page certificate. + certificate: typing.List[str] + + #: Certificate subject name. + subject_name: str + + #: Name of the issuing CA. + issuer: str + + #: Certificate valid from date. + valid_from: network.TimeSinceEpoch + + #: Certificate valid to (expiration) date + valid_to: network.TimeSinceEpoch + + #: True if the certificate uses a weak signature algorithm. + certificate_has_weak_signature: bool + + #: True if the certificate has a SHA1 signature in the chain. + certificate_has_sha1_signature: bool + + #: True if modern SSL + modern_ssl: bool + + #: True if the connection is using an obsolete SSL protocol. + obsolete_ssl_protocol: bool + + #: True if the connection is using an obsolete SSL key exchange. + obsolete_ssl_key_exchange: bool + + #: True if the connection is using an obsolete SSL cipher. + obsolete_ssl_cipher: bool + + #: True if the connection is using an obsolete SSL signature. + obsolete_ssl_signature: bool + + #: (EC)DH group used by the connection, if applicable. + key_exchange_group: typing.Optional[str] = None + + #: TLS MAC. Note that AEAD ciphers do not have separate MACs. + mac: typing.Optional[str] = None + + #: The highest priority network error code, if the certificate has an error. + certificate_network_error: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["protocol"] = self.protocol + json["keyExchange"] = self.key_exchange + json["cipher"] = self.cipher + json["certificate"] = [i for i in self.certificate] + json["subjectName"] = self.subject_name + json["issuer"] = self.issuer + json["validFrom"] = self.valid_from.to_json() + json["validTo"] = self.valid_to.to_json() + json["certificateHasWeakSignature"] = self.certificate_has_weak_signature + json["certificateHasSha1Signature"] = self.certificate_has_sha1_signature + json["modernSSL"] = self.modern_ssl + json["obsoleteSslProtocol"] = self.obsolete_ssl_protocol + json["obsoleteSslKeyExchange"] = self.obsolete_ssl_key_exchange + json["obsoleteSslCipher"] = self.obsolete_ssl_cipher + json["obsoleteSslSignature"] = self.obsolete_ssl_signature + if self.key_exchange_group is not None: + json["keyExchangeGroup"] = self.key_exchange_group + if self.mac is not None: + json["mac"] = self.mac + if self.certificate_network_error is not None: + json["certificateNetworkError"] = self.certificate_network_error + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CertificateSecurityState: + return cls( + protocol=str(json["protocol"]), + key_exchange=str(json["keyExchange"]), + cipher=str(json["cipher"]), + certificate=[str(i) for i in json["certificate"]], + subject_name=str(json["subjectName"]), + issuer=str(json["issuer"]), + valid_from=network.TimeSinceEpoch.from_json(json["validFrom"]), + valid_to=network.TimeSinceEpoch.from_json(json["validTo"]), + certificate_has_weak_signature=bool(json["certificateHasWeakSignature"]), + certificate_has_sha1_signature=bool(json["certificateHasSha1Signature"]), + modern_ssl=bool(json["modernSSL"]), + obsolete_ssl_protocol=bool(json["obsoleteSslProtocol"]), + obsolete_ssl_key_exchange=bool(json["obsoleteSslKeyExchange"]), + obsolete_ssl_cipher=bool(json["obsoleteSslCipher"]), + obsolete_ssl_signature=bool(json["obsoleteSslSignature"]), + key_exchange_group=( + str(json["keyExchangeGroup"]) + if json.get("keyExchangeGroup", None) is not None + else None + ), + mac=str(json["mac"]) if json.get("mac", None) is not None else None, + certificate_network_error=( + str(json["certificateNetworkError"]) + if json.get("certificateNetworkError", None) is not None + else None + ), + )
+ + + +
+[docs] +class SafetyTipStatus(enum.Enum): + BAD_REPUTATION = "badReputation" + LOOKALIKE = "lookalike" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SafetyTipStatus: + return cls(json)
+ + + +
+[docs] +@dataclass +class SafetyTipInfo: + #: Describes whether the page triggers any safety tips or reputation warnings. Default is unknown. + safety_tip_status: SafetyTipStatus + + #: The URL the safety tip suggested ("Did you mean?"). Only filled in for lookalike matches. + safe_url: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["safetyTipStatus"] = self.safety_tip_status.to_json() + if self.safe_url is not None: + json["safeUrl"] = self.safe_url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SafetyTipInfo: + return cls( + safety_tip_status=SafetyTipStatus.from_json(json["safetyTipStatus"]), + safe_url=( + str(json["safeUrl"]) if json.get("safeUrl", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class VisibleSecurityState: + """ + Security state information about the page. + """ + + #: The security level of the page. + security_state: SecurityState + + #: Array of security state issues ids. + security_state_issue_ids: typing.List[str] + + #: Security state details about the page certificate. + certificate_security_state: typing.Optional[CertificateSecurityState] = None + + #: The type of Safety Tip triggered on the page. Note that this field will be set even if the Safety Tip UI was not actually shown. + safety_tip_info: typing.Optional[SafetyTipInfo] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["securityState"] = self.security_state.to_json() + json["securityStateIssueIds"] = [i for i in self.security_state_issue_ids] + if self.certificate_security_state is not None: + json["certificateSecurityState"] = self.certificate_security_state.to_json() + if self.safety_tip_info is not None: + json["safetyTipInfo"] = self.safety_tip_info.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VisibleSecurityState: + return cls( + security_state=SecurityState.from_json(json["securityState"]), + security_state_issue_ids=[str(i) for i in json["securityStateIssueIds"]], + certificate_security_state=( + CertificateSecurityState.from_json(json["certificateSecurityState"]) + if json.get("certificateSecurityState", None) is not None + else None + ), + safety_tip_info=( + SafetyTipInfo.from_json(json["safetyTipInfo"]) + if json.get("safetyTipInfo", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class SecurityStateExplanation: + """ + An explanation of an factor contributing to the security state. + """ + + #: Security state representing the severity of the factor being explained. + security_state: SecurityState + + #: Title describing the type of factor. + title: str + + #: Short phrase describing the type of factor. + summary: str + + #: Full text explanation of the factor. + description: str + + #: The type of mixed content described by the explanation. + mixed_content_type: MixedContentType + + #: Page certificate. + certificate: typing.List[str] + + #: Recommendations to fix any issues. + recommendations: typing.Optional[typing.List[str]] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["securityState"] = self.security_state.to_json() + json["title"] = self.title + json["summary"] = self.summary + json["description"] = self.description + json["mixedContentType"] = self.mixed_content_type.to_json() + json["certificate"] = [i for i in self.certificate] + if self.recommendations is not None: + json["recommendations"] = [i for i in self.recommendations] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SecurityStateExplanation: + return cls( + security_state=SecurityState.from_json(json["securityState"]), + title=str(json["title"]), + summary=str(json["summary"]), + description=str(json["description"]), + mixed_content_type=MixedContentType.from_json(json["mixedContentType"]), + certificate=[str(i) for i in json["certificate"]], + recommendations=( + [str(i) for i in json["recommendations"]] + if json.get("recommendations", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class InsecureContentStatus: + """ + Information about insecure content on the page. + """ + + #: Always false. + ran_mixed_content: bool + + #: Always false. + displayed_mixed_content: bool + + #: Always false. + contained_mixed_form: bool + + #: Always false. + ran_content_with_cert_errors: bool + + #: Always false. + displayed_content_with_cert_errors: bool + + #: Always set to unknown. + ran_insecure_content_style: SecurityState + + #: Always set to unknown. + displayed_insecure_content_style: SecurityState + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["ranMixedContent"] = self.ran_mixed_content + json["displayedMixedContent"] = self.displayed_mixed_content + json["containedMixedForm"] = self.contained_mixed_form + json["ranContentWithCertErrors"] = self.ran_content_with_cert_errors + json["displayedContentWithCertErrors"] = self.displayed_content_with_cert_errors + json["ranInsecureContentStyle"] = self.ran_insecure_content_style.to_json() + json["displayedInsecureContentStyle"] = ( + self.displayed_insecure_content_style.to_json() + ) + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InsecureContentStatus: + return cls( + ran_mixed_content=bool(json["ranMixedContent"]), + displayed_mixed_content=bool(json["displayedMixedContent"]), + contained_mixed_form=bool(json["containedMixedForm"]), + ran_content_with_cert_errors=bool(json["ranContentWithCertErrors"]), + displayed_content_with_cert_errors=bool( + json["displayedContentWithCertErrors"] + ), + ran_insecure_content_style=SecurityState.from_json( + json["ranInsecureContentStyle"] + ), + displayed_insecure_content_style=SecurityState.from_json( + json["displayedInsecureContentStyle"] + ), + )
+ + + +
+[docs] +class CertificateErrorAction(enum.Enum): + """ + The action to take when a certificate error occurs. continue will continue processing the + request and cancel will cancel the request. + """ + + CONTINUE = "continue" + CANCEL = "cancel" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> CertificateErrorAction: + return cls(json)
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables tracking security state changes. + """ + cmd_dict: T_JSON_DICT = { + "method": "Security.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables tracking security state changes. + """ + cmd_dict: T_JSON_DICT = { + "method": "Security.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def set_ignore_certificate_errors( + ignore: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable/disable whether all certificate errors should be ignored. + + :param ignore: If true, all certificate errors will be ignored. + """ + params: T_JSON_DICT = dict() + params["ignore"] = ignore + cmd_dict: T_JSON_DICT = { + "method": "Security.setIgnoreCertificateErrors", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def handle_certificate_error( + event_id: int, action: CertificateErrorAction +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Handles a certificate error that fired a certificateError event. + + .. deprecated:: 1.3 + + :param event_id: The ID of the event. + :param action: The action to take on the certificate error. + """ + params: T_JSON_DICT = dict() + params["eventId"] = event_id + params["action"] = action.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Security.handleCertificateError", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +def set_override_certificate_errors( + override: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable/disable overriding certificate errors. If enabled, all certificate error events need to + be handled by the DevTools client and should be answered with ``handleCertificateError`` commands. + + .. deprecated:: 1.3 + + :param override: If true, certificate errors will be overridden. + """ + params: T_JSON_DICT = dict() + params["override"] = override + cmd_dict: T_JSON_DICT = { + "method": "Security.setOverrideCertificateErrors", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Security.certificateError") +@dataclass +class CertificateError: + """ + There is a certificate error. If overriding certificate errors is enabled, then it should be + handled with the ``handleCertificateError`` command. Note: this event does not fire if the + certificate error has been allowed internally. Only one client per target should override + certificate errors at the same time. + """ + + #: The ID of the event. + event_id: int + #: The type of the error. + error_type: str + #: The url that was requested. + request_url: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CertificateError: + return cls( + event_id=int(json["eventId"]), + error_type=str(json["errorType"]), + request_url=str(json["requestURL"]), + )
+ + + +
+[docs] +@event_class("Security.visibleSecurityStateChanged") +@dataclass +class VisibleSecurityStateChanged: + """ + **EXPERIMENTAL** + + The security state of the page changed. + """ + + #: Security state information about the page. + visible_security_state: VisibleSecurityState + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VisibleSecurityStateChanged: + return cls( + visible_security_state=VisibleSecurityState.from_json( + json["visibleSecurityState"] + ) + )
+ + + +
+[docs] +@deprecated(version="1.3") +@event_class("Security.securityStateChanged") +@dataclass +class SecurityStateChanged: + """ + The security state of the page changed. No longer being sent. + """ + + #: Security state. + security_state: SecurityState + #: True if the page was loaded over cryptographic transport such as HTTPS. + scheme_is_cryptographic: bool + #: Previously a list of explanations for the security state. Now always + #: empty. + explanations: typing.List[SecurityStateExplanation] + #: Information about insecure content on the page. + insecure_content_status: InsecureContentStatus + #: Overrides user-visible description of the state. Always omitted. + summary: typing.Optional[str] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SecurityStateChanged: + return cls( + security_state=SecurityState.from_json(json["securityState"]), + scheme_is_cryptographic=bool(json["schemeIsCryptographic"]), + explanations=[ + SecurityStateExplanation.from_json(i) for i in json["explanations"] + ], + insecure_content_status=InsecureContentStatus.from_json( + json["insecureContentStatus"] + ), + summary=( + str(json["summary"]) if json.get("summary", None) is not None else None + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/service_worker.html b/docs/_build/html/_modules/nodriver/cdp/service_worker.html new file mode 100644 index 0000000..ac05370 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/service_worker.html @@ -0,0 +1,794 @@ + + + + + + + + nodriver.cdp.service_worker - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.service_worker

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: ServiceWorker (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import target
+
+
+
+[docs] +class RegistrationID(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> RegistrationID: + return cls(json) + + def __repr__(self): + return "RegistrationID({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class ServiceWorkerRegistration: + """ + ServiceWorker registration. + """ + + registration_id: RegistrationID + + scope_url: str + + is_deleted: bool + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["registrationId"] = self.registration_id.to_json() + json["scopeURL"] = self.scope_url + json["isDeleted"] = self.is_deleted + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ServiceWorkerRegistration: + return cls( + registration_id=RegistrationID.from_json(json["registrationId"]), + scope_url=str(json["scopeURL"]), + is_deleted=bool(json["isDeleted"]), + )
+ + + +
+[docs] +class ServiceWorkerVersionRunningStatus(enum.Enum): + STOPPED = "stopped" + STARTING = "starting" + RUNNING = "running" + STOPPING = "stopping" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ServiceWorkerVersionRunningStatus: + return cls(json)
+ + + +
+[docs] +class ServiceWorkerVersionStatus(enum.Enum): + NEW = "new" + INSTALLING = "installing" + INSTALLED = "installed" + ACTIVATING = "activating" + ACTIVATED = "activated" + REDUNDANT = "redundant" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ServiceWorkerVersionStatus: + return cls(json)
+ + + +
+[docs] +@dataclass +class ServiceWorkerVersion: + """ + ServiceWorker version. + """ + + version_id: str + + registration_id: RegistrationID + + script_url: str + + running_status: ServiceWorkerVersionRunningStatus + + status: ServiceWorkerVersionStatus + + #: The Last-Modified header value of the main script. + script_last_modified: typing.Optional[float] = None + + #: The time at which the response headers of the main script were received from the server. + #: For cached script it is the last time the cache entry was validated. + script_response_time: typing.Optional[float] = None + + controlled_clients: typing.Optional[typing.List[target.TargetID]] = None + + target_id: typing.Optional[target.TargetID] = None + + router_rules: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["versionId"] = self.version_id + json["registrationId"] = self.registration_id.to_json() + json["scriptURL"] = self.script_url + json["runningStatus"] = self.running_status.to_json() + json["status"] = self.status.to_json() + if self.script_last_modified is not None: + json["scriptLastModified"] = self.script_last_modified + if self.script_response_time is not None: + json["scriptResponseTime"] = self.script_response_time + if self.controlled_clients is not None: + json["controlledClients"] = [i.to_json() for i in self.controlled_clients] + if self.target_id is not None: + json["targetId"] = self.target_id.to_json() + if self.router_rules is not None: + json["routerRules"] = self.router_rules + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ServiceWorkerVersion: + return cls( + version_id=str(json["versionId"]), + registration_id=RegistrationID.from_json(json["registrationId"]), + script_url=str(json["scriptURL"]), + running_status=ServiceWorkerVersionRunningStatus.from_json( + json["runningStatus"] + ), + status=ServiceWorkerVersionStatus.from_json(json["status"]), + script_last_modified=( + float(json["scriptLastModified"]) + if json.get("scriptLastModified", None) is not None + else None + ), + script_response_time=( + float(json["scriptResponseTime"]) + if json.get("scriptResponseTime", None) is not None + else None + ), + controlled_clients=( + [target.TargetID.from_json(i) for i in json["controlledClients"]] + if json.get("controlledClients", None) is not None + else None + ), + target_id=( + target.TargetID.from_json(json["targetId"]) + if json.get("targetId", None) is not None + else None + ), + router_rules=( + str(json["routerRules"]) + if json.get("routerRules", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ServiceWorkerErrorMessage: + """ + ServiceWorker error message. + """ + + error_message: str + + registration_id: RegistrationID + + version_id: str + + source_url: str + + line_number: int + + column_number: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["errorMessage"] = self.error_message + json["registrationId"] = self.registration_id.to_json() + json["versionId"] = self.version_id + json["sourceURL"] = self.source_url + json["lineNumber"] = self.line_number + json["columnNumber"] = self.column_number + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ServiceWorkerErrorMessage: + return cls( + error_message=str(json["errorMessage"]), + registration_id=RegistrationID.from_json(json["registrationId"]), + version_id=str(json["versionId"]), + source_url=str(json["sourceURL"]), + line_number=int(json["lineNumber"]), + column_number=int(json["columnNumber"]), + )
+ + + +
+[docs] +def deliver_push_message( + origin: str, registration_id: RegistrationID, data: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param origin: + :param registration_id: + :param data: + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + params["registrationId"] = registration_id.to_json() + params["data"] = data + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.deliverPushMessage", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def dispatch_sync_event( + origin: str, registration_id: RegistrationID, tag: str, last_chance: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param origin: + :param registration_id: + :param tag: + :param last_chance: + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + params["registrationId"] = registration_id.to_json() + params["tag"] = tag + params["lastChance"] = last_chance + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.dispatchSyncEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dispatch_periodic_sync_event( + origin: str, registration_id: RegistrationID, tag: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param origin: + :param registration_id: + :param tag: + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + params["registrationId"] = registration_id.to_json() + params["tag"] = tag + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.dispatchPeriodicSyncEvent", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def inspect_worker(version_id: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param version_id: + """ + params: T_JSON_DICT = dict() + params["versionId"] = version_id + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.inspectWorker", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_force_update_on_page_load( + force_update_on_page_load: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param force_update_on_page_load: + """ + params: T_JSON_DICT = dict() + params["forceUpdateOnPageLoad"] = force_update_on_page_load + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.setForceUpdateOnPageLoad", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def skip_waiting(scope_url: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param scope_url: + """ + params: T_JSON_DICT = dict() + params["scopeURL"] = scope_url + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.skipWaiting", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def start_worker(scope_url: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param scope_url: + """ + params: T_JSON_DICT = dict() + params["scopeURL"] = scope_url + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.startWorker", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_all_workers() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.stopAllWorkers", + } + json = yield cmd_dict
+ + + +
+[docs] +def stop_worker(version_id: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param version_id: + """ + params: T_JSON_DICT = dict() + params["versionId"] = version_id + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.stopWorker", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def unregister(scope_url: str) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param scope_url: + """ + params: T_JSON_DICT = dict() + params["scopeURL"] = scope_url + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.unregister", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def update_registration( + scope_url: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + :param scope_url: + """ + params: T_JSON_DICT = dict() + params["scopeURL"] = scope_url + cmd_dict: T_JSON_DICT = { + "method": "ServiceWorker.updateRegistration", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("ServiceWorker.workerErrorReported") +@dataclass +class WorkerErrorReported: + error_message: ServiceWorkerErrorMessage + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WorkerErrorReported: + return cls( + error_message=ServiceWorkerErrorMessage.from_json(json["errorMessage"]) + )
+ + + +
+[docs] +@event_class("ServiceWorker.workerRegistrationUpdated") +@dataclass +class WorkerRegistrationUpdated: + registrations: typing.List[ServiceWorkerRegistration] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WorkerRegistrationUpdated: + return cls( + registrations=[ + ServiceWorkerRegistration.from_json(i) for i in json["registrations"] + ] + )
+ + + +
+[docs] +@event_class("ServiceWorker.workerVersionUpdated") +@dataclass +class WorkerVersionUpdated: + versions: typing.List[ServiceWorkerVersion] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> WorkerVersionUpdated: + return cls( + versions=[ServiceWorkerVersion.from_json(i) for i in json["versions"]] + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/storage.html b/docs/_build/html/_modules/nodriver/cdp/storage.html new file mode 100644 index 0000000..8441dd6 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/storage.html @@ -0,0 +1,2702 @@ + + + + + + + + nodriver.cdp.storage - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.storage

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Storage (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import browser
+from . import network
+from . import page
+
+
+
+[docs] +class SerializedStorageKey(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> SerializedStorageKey: + return cls(json) + + def __repr__(self): + return "SerializedStorageKey({})".format(super().__repr__())
+ + + +
+[docs] +class StorageType(enum.Enum): + """ + Enum of possible storage types. + """ + + APPCACHE = "appcache" + COOKIES = "cookies" + FILE_SYSTEMS = "file_systems" + INDEXEDDB = "indexeddb" + LOCAL_STORAGE = "local_storage" + SHADER_CACHE = "shader_cache" + WEBSQL = "websql" + SERVICE_WORKERS = "service_workers" + CACHE_STORAGE = "cache_storage" + INTEREST_GROUPS = "interest_groups" + SHARED_STORAGE = "shared_storage" + STORAGE_BUCKETS = "storage_buckets" + ALL_ = "all" + OTHER = "other" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StorageType: + return cls(json)
+ + + +
+[docs] +@dataclass +class UsageForType: + """ + Usage for a storage type. + """ + + #: Name of storage type. + storage_type: StorageType + + #: Storage usage (bytes). + usage: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["storageType"] = self.storage_type.to_json() + json["usage"] = self.usage + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> UsageForType: + return cls( + storage_type=StorageType.from_json(json["storageType"]), + usage=float(json["usage"]), + )
+ + + +
+[docs] +@dataclass +class TrustTokens: + """ + Pair of issuer origin and number of available (signed, but not used) Trust + Tokens from that issuer. + """ + + issuer_origin: str + + count: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["issuerOrigin"] = self.issuer_origin + json["count"] = self.count + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TrustTokens: + return cls( + issuer_origin=str(json["issuerOrigin"]), + count=float(json["count"]), + )
+ + + +
+[docs] +class InterestGroupAuctionId(str): + """ + Protected audience interest group auction identifier. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> InterestGroupAuctionId: + return cls(json) + + def __repr__(self): + return "InterestGroupAuctionId({})".format(super().__repr__())
+ + + +
+[docs] +class InterestGroupAccessType(enum.Enum): + """ + Enum of interest group access types. + """ + + JOIN = "join" + LEAVE = "leave" + UPDATE = "update" + LOADED = "loaded" + BID = "bid" + WIN = "win" + ADDITIONAL_BID = "additionalBid" + ADDITIONAL_BID_WIN = "additionalBidWin" + TOP_LEVEL_BID = "topLevelBid" + TOP_LEVEL_ADDITIONAL_BID = "topLevelAdditionalBid" + CLEAR = "clear" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InterestGroupAccessType: + return cls(json)
+ + + +
+[docs] +class InterestGroupAuctionEventType(enum.Enum): + """ + Enum of auction events. + """ + + STARTED = "started" + CONFIG_RESOLVED = "configResolved" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InterestGroupAuctionEventType: + return cls(json)
+ + + +
+[docs] +class InterestGroupAuctionFetchType(enum.Enum): + """ + Enum of network fetches auctions can do. + """ + + BIDDER_JS = "bidderJs" + BIDDER_WASM = "bidderWasm" + SELLER_JS = "sellerJs" + BIDDER_TRUSTED_SIGNALS = "bidderTrustedSignals" + SELLER_TRUSTED_SIGNALS = "sellerTrustedSignals" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> InterestGroupAuctionFetchType: + return cls(json)
+ + + +
+[docs] +class SharedStorageAccessType(enum.Enum): + """ + Enum of shared storage access types. + """ + + DOCUMENT_ADD_MODULE = "documentAddModule" + DOCUMENT_SELECT_URL = "documentSelectURL" + DOCUMENT_RUN = "documentRun" + DOCUMENT_SET = "documentSet" + DOCUMENT_APPEND = "documentAppend" + DOCUMENT_DELETE = "documentDelete" + DOCUMENT_CLEAR = "documentClear" + DOCUMENT_GET = "documentGet" + WORKLET_SET = "workletSet" + WORKLET_APPEND = "workletAppend" + WORKLET_DELETE = "workletDelete" + WORKLET_CLEAR = "workletClear" + WORKLET_GET = "workletGet" + WORKLET_KEYS = "workletKeys" + WORKLET_ENTRIES = "workletEntries" + WORKLET_LENGTH = "workletLength" + WORKLET_REMAINING_BUDGET = "workletRemainingBudget" + HEADER_SET = "headerSet" + HEADER_APPEND = "headerAppend" + HEADER_DELETE = "headerDelete" + HEADER_CLEAR = "headerClear" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SharedStorageAccessType: + return cls(json)
+ + + +
+[docs] +@dataclass +class SharedStorageEntry: + """ + Struct for a single key-value pair in an origin's shared storage. + """ + + key: str + + value: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key + json["value"] = self.value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageEntry: + return cls( + key=str(json["key"]), + value=str(json["value"]), + )
+ + + +
+[docs] +@dataclass +class SharedStorageMetadata: + """ + Details for an origin's shared storage. + """ + + #: Time when the origin's shared storage was last created. + creation_time: network.TimeSinceEpoch + + #: Number of key-value pairs stored in origin's shared storage. + length: int + + #: Current amount of bits of entropy remaining in the navigation budget. + remaining_budget: float + + #: Total number of bytes stored as key-value pairs in origin's shared + #: storage. + bytes_used: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["creationTime"] = self.creation_time.to_json() + json["length"] = self.length + json["remainingBudget"] = self.remaining_budget + json["bytesUsed"] = self.bytes_used + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageMetadata: + return cls( + creation_time=network.TimeSinceEpoch.from_json(json["creationTime"]), + length=int(json["length"]), + remaining_budget=float(json["remainingBudget"]), + bytes_used=int(json["bytesUsed"]), + )
+ + + +
+[docs] +@dataclass +class SharedStorageReportingMetadata: + """ + Pair of reporting metadata details for a candidate URL for ``selectURL()``. + """ + + event_type: str + + reporting_url: str + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["eventType"] = self.event_type + json["reportingUrl"] = self.reporting_url + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageReportingMetadata: + return cls( + event_type=str(json["eventType"]), + reporting_url=str(json["reportingUrl"]), + )
+ + + +
+[docs] +@dataclass +class SharedStorageUrlWithMetadata: + """ + Bundles a candidate URL with its reporting metadata. + """ + + #: Spec of candidate URL. + url: str + + #: Any associated reporting metadata. + reporting_metadata: typing.List[SharedStorageReportingMetadata] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["url"] = self.url + json["reportingMetadata"] = [i.to_json() for i in self.reporting_metadata] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageUrlWithMetadata: + return cls( + url=str(json["url"]), + reporting_metadata=[ + SharedStorageReportingMetadata.from_json(i) + for i in json["reportingMetadata"] + ], + )
+ + + +
+[docs] +@dataclass +class SharedStorageAccessParams: + """ + Bundles the parameters for shared storage access events whose + presence/absence can vary according to SharedStorageAccessType. + """ + + #: Spec of the module script URL. + #: Present only for SharedStorageAccessType.documentAddModule. + script_source_url: typing.Optional[str] = None + + #: Name of the registered operation to be run. + #: Present only for SharedStorageAccessType.documentRun and + #: SharedStorageAccessType.documentSelectURL. + operation_name: typing.Optional[str] = None + + #: The operation's serialized data in bytes (converted to a string). + #: Present only for SharedStorageAccessType.documentRun and + #: SharedStorageAccessType.documentSelectURL. + serialized_data: typing.Optional[str] = None + + #: Array of candidate URLs' specs, along with any associated metadata. + #: Present only for SharedStorageAccessType.documentSelectURL. + urls_with_metadata: typing.Optional[typing.List[SharedStorageUrlWithMetadata]] = ( + None + ) + + #: Key for a specific entry in an origin's shared storage. + #: Present only for SharedStorageAccessType.documentSet, + #: SharedStorageAccessType.documentAppend, + #: SharedStorageAccessType.documentDelete, + #: SharedStorageAccessType.workletSet, + #: SharedStorageAccessType.workletAppend, + #: SharedStorageAccessType.workletDelete, + #: SharedStorageAccessType.workletGet, + #: SharedStorageAccessType.headerSet, + #: SharedStorageAccessType.headerAppend, and + #: SharedStorageAccessType.headerDelete. + key: typing.Optional[str] = None + + #: Value for a specific entry in an origin's shared storage. + #: Present only for SharedStorageAccessType.documentSet, + #: SharedStorageAccessType.documentAppend, + #: SharedStorageAccessType.workletSet, + #: SharedStorageAccessType.workletAppend, + #: SharedStorageAccessType.headerSet, and + #: SharedStorageAccessType.headerAppend. + value: typing.Optional[str] = None + + #: Whether or not to set an entry for a key if that key is already present. + #: Present only for SharedStorageAccessType.documentSet, + #: SharedStorageAccessType.workletSet, and + #: SharedStorageAccessType.headerSet. + ignore_if_present: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.script_source_url is not None: + json["scriptSourceUrl"] = self.script_source_url + if self.operation_name is not None: + json["operationName"] = self.operation_name + if self.serialized_data is not None: + json["serializedData"] = self.serialized_data + if self.urls_with_metadata is not None: + json["urlsWithMetadata"] = [i.to_json() for i in self.urls_with_metadata] + if self.key is not None: + json["key"] = self.key + if self.value is not None: + json["value"] = self.value + if self.ignore_if_present is not None: + json["ignoreIfPresent"] = self.ignore_if_present + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageAccessParams: + return cls( + script_source_url=( + str(json["scriptSourceUrl"]) + if json.get("scriptSourceUrl", None) is not None + else None + ), + operation_name=( + str(json["operationName"]) + if json.get("operationName", None) is not None + else None + ), + serialized_data=( + str(json["serializedData"]) + if json.get("serializedData", None) is not None + else None + ), + urls_with_metadata=( + [ + SharedStorageUrlWithMetadata.from_json(i) + for i in json["urlsWithMetadata"] + ] + if json.get("urlsWithMetadata", None) is not None + else None + ), + key=str(json["key"]) if json.get("key", None) is not None else None, + value=str(json["value"]) if json.get("value", None) is not None else None, + ignore_if_present=( + bool(json["ignoreIfPresent"]) + if json.get("ignoreIfPresent", None) is not None + else None + ), + )
+ + + +
+[docs] +class StorageBucketsDurability(enum.Enum): + RELAXED = "relaxed" + STRICT = "strict" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StorageBucketsDurability: + return cls(json)
+ + + +
+[docs] +@dataclass +class StorageBucket: + storage_key: SerializedStorageKey + + #: If not specified, it is the default bucket of the storageKey. + name: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["storageKey"] = self.storage_key.to_json() + if self.name is not None: + json["name"] = self.name + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StorageBucket: + return cls( + storage_key=SerializedStorageKey.from_json(json["storageKey"]), + name=str(json["name"]) if json.get("name", None) is not None else None, + )
+ + + +
+[docs] +@dataclass +class StorageBucketInfo: + bucket: StorageBucket + + id_: str + + expiration: network.TimeSinceEpoch + + #: Storage quota (bytes). + quota: float + + persistent: bool + + durability: StorageBucketsDurability + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["bucket"] = self.bucket.to_json() + json["id"] = self.id_ + json["expiration"] = self.expiration.to_json() + json["quota"] = self.quota + json["persistent"] = self.persistent + json["durability"] = self.durability.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StorageBucketInfo: + return cls( + bucket=StorageBucket.from_json(json["bucket"]), + id_=str(json["id"]), + expiration=network.TimeSinceEpoch.from_json(json["expiration"]), + quota=float(json["quota"]), + persistent=bool(json["persistent"]), + durability=StorageBucketsDurability.from_json(json["durability"]), + )
+ + + +
+[docs] +class AttributionReportingSourceType(enum.Enum): + NAVIGATION = "navigation" + EVENT = "event" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingSourceType: + return cls(json)
+ + + +
+[docs] +class UnsignedInt64AsBase10(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> UnsignedInt64AsBase10: + return cls(json) + + def __repr__(self): + return "UnsignedInt64AsBase10({})".format(super().__repr__())
+ + + +
+[docs] +class UnsignedInt128AsBase16(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> UnsignedInt128AsBase16: + return cls(json) + + def __repr__(self): + return "UnsignedInt128AsBase16({})".format(super().__repr__())
+ + + +
+[docs] +class SignedInt64AsBase10(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> SignedInt64AsBase10: + return cls(json) + + def __repr__(self): + return "SignedInt64AsBase10({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class AttributionReportingFilterDataEntry: + key: str + + values: typing.List[str] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key + json["values"] = [i for i in self.values] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingFilterDataEntry: + return cls( + key=str(json["key"]), + values=[str(i) for i in json["values"]], + )
+ + + +
+[docs] +@dataclass +class AttributionReportingFilterConfig: + filter_values: typing.List[AttributionReportingFilterDataEntry] + + #: duration in seconds + lookback_window: typing.Optional[int] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["filterValues"] = [i.to_json() for i in self.filter_values] + if self.lookback_window is not None: + json["lookbackWindow"] = self.lookback_window + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingFilterConfig: + return cls( + filter_values=[ + AttributionReportingFilterDataEntry.from_json(i) + for i in json["filterValues"] + ], + lookback_window=( + int(json["lookbackWindow"]) + if json.get("lookbackWindow", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingFilterPair: + filters: typing.List[AttributionReportingFilterConfig] + + not_filters: typing.List[AttributionReportingFilterConfig] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["filters"] = [i.to_json() for i in self.filters] + json["notFilters"] = [i.to_json() for i in self.not_filters] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingFilterPair: + return cls( + filters=[ + AttributionReportingFilterConfig.from_json(i) for i in json["filters"] + ], + not_filters=[ + AttributionReportingFilterConfig.from_json(i) + for i in json["notFilters"] + ], + )
+ + + +
+[docs] +@dataclass +class AttributionReportingAggregationKeysEntry: + key: str + + value: UnsignedInt128AsBase16 + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key + json["value"] = self.value.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingAggregationKeysEntry: + return cls( + key=str(json["key"]), + value=UnsignedInt128AsBase16.from_json(json["value"]), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingEventReportWindows: + #: duration in seconds + start: int + + #: duration in seconds + ends: typing.List[int] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["start"] = self.start + json["ends"] = [i for i in self.ends] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingEventReportWindows: + return cls( + start=int(json["start"]), + ends=[int(i) for i in json["ends"]], + )
+ + + +
+[docs] +@dataclass +class AttributionReportingTriggerSpec: + #: number instead of integer because not all uint32 can be represented by + #: int + trigger_data: typing.List[float] + + event_report_windows: AttributionReportingEventReportWindows + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["triggerData"] = [i for i in self.trigger_data] + json["eventReportWindows"] = self.event_report_windows.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingTriggerSpec: + return cls( + trigger_data=[float(i) for i in json["triggerData"]], + event_report_windows=AttributionReportingEventReportWindows.from_json( + json["eventReportWindows"] + ), + )
+ + + +
+[docs] +class AttributionReportingTriggerDataMatching(enum.Enum): + EXACT = "exact" + MODULUS = "modulus" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingTriggerDataMatching: + return cls(json)
+ + + +
+[docs] +@dataclass +class AttributionReportingSourceRegistration: + time: network.TimeSinceEpoch + + #: duration in seconds + expiry: int + + trigger_specs: typing.List[AttributionReportingTriggerSpec] + + #: duration in seconds + aggregatable_report_window: int + + type_: AttributionReportingSourceType + + source_origin: str + + reporting_origin: str + + destination_sites: typing.List[str] + + event_id: UnsignedInt64AsBase10 + + priority: SignedInt64AsBase10 + + filter_data: typing.List[AttributionReportingFilterDataEntry] + + aggregation_keys: typing.List[AttributionReportingAggregationKeysEntry] + + trigger_data_matching: AttributionReportingTriggerDataMatching + + debug_key: typing.Optional[UnsignedInt64AsBase10] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["time"] = self.time.to_json() + json["expiry"] = self.expiry + json["triggerSpecs"] = [i.to_json() for i in self.trigger_specs] + json["aggregatableReportWindow"] = self.aggregatable_report_window + json["type"] = self.type_.to_json() + json["sourceOrigin"] = self.source_origin + json["reportingOrigin"] = self.reporting_origin + json["destinationSites"] = [i for i in self.destination_sites] + json["eventId"] = self.event_id.to_json() + json["priority"] = self.priority.to_json() + json["filterData"] = [i.to_json() for i in self.filter_data] + json["aggregationKeys"] = [i.to_json() for i in self.aggregation_keys] + json["triggerDataMatching"] = self.trigger_data_matching.to_json() + if self.debug_key is not None: + json["debugKey"] = self.debug_key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingSourceRegistration: + return cls( + time=network.TimeSinceEpoch.from_json(json["time"]), + expiry=int(json["expiry"]), + trigger_specs=[ + AttributionReportingTriggerSpec.from_json(i) + for i in json["triggerSpecs"] + ], + aggregatable_report_window=int(json["aggregatableReportWindow"]), + type_=AttributionReportingSourceType.from_json(json["type"]), + source_origin=str(json["sourceOrigin"]), + reporting_origin=str(json["reportingOrigin"]), + destination_sites=[str(i) for i in json["destinationSites"]], + event_id=UnsignedInt64AsBase10.from_json(json["eventId"]), + priority=SignedInt64AsBase10.from_json(json["priority"]), + filter_data=[ + AttributionReportingFilterDataEntry.from_json(i) + for i in json["filterData"] + ], + aggregation_keys=[ + AttributionReportingAggregationKeysEntry.from_json(i) + for i in json["aggregationKeys"] + ], + trigger_data_matching=AttributionReportingTriggerDataMatching.from_json( + json["triggerDataMatching"] + ), + debug_key=( + UnsignedInt64AsBase10.from_json(json["debugKey"]) + if json.get("debugKey", None) is not None + else None + ), + )
+ + + +
+[docs] +class AttributionReportingSourceRegistrationResult(enum.Enum): + SUCCESS = "success" + INTERNAL_ERROR = "internalError" + INSUFFICIENT_SOURCE_CAPACITY = "insufficientSourceCapacity" + INSUFFICIENT_UNIQUE_DESTINATION_CAPACITY = "insufficientUniqueDestinationCapacity" + EXCESSIVE_REPORTING_ORIGINS = "excessiveReportingOrigins" + PROHIBITED_BY_BROWSER_POLICY = "prohibitedByBrowserPolicy" + SUCCESS_NOISED = "successNoised" + DESTINATION_REPORTING_LIMIT_REACHED = "destinationReportingLimitReached" + DESTINATION_GLOBAL_LIMIT_REACHED = "destinationGlobalLimitReached" + DESTINATION_BOTH_LIMITS_REACHED = "destinationBothLimitsReached" + REPORTING_ORIGINS_PER_SITE_LIMIT_REACHED = "reportingOriginsPerSiteLimitReached" + EXCEEDS_MAX_CHANNEL_CAPACITY = "exceedsMaxChannelCapacity" + EXCEEDS_MAX_TRIGGER_STATE_CARDINALITY = "exceedsMaxTriggerStateCardinality" + DESTINATION_PER_DAY_REPORTING_LIMIT_REACHED = ( + "destinationPerDayReportingLimitReached" + ) + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingSourceRegistrationResult: + return cls(json)
+ + + +
+[docs] +class AttributionReportingSourceRegistrationTimeConfig(enum.Enum): + INCLUDE = "include" + EXCLUDE = "exclude" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingSourceRegistrationTimeConfig: + return cls(json)
+ + + +
+[docs] +@dataclass +class AttributionReportingAggregatableValueDictEntry: + key: str + + #: number instead of integer because not all uint32 can be represented by + #: int + value: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["key"] = self.key + json["value"] = self.value + return json + + @classmethod + def from_json( + cls, json: T_JSON_DICT + ) -> AttributionReportingAggregatableValueDictEntry: + return cls( + key=str(json["key"]), + value=float(json["value"]), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingAggregatableValueEntry: + values: typing.List[AttributionReportingAggregatableValueDictEntry] + + filters: AttributionReportingFilterPair + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["values"] = [i.to_json() for i in self.values] + json["filters"] = self.filters.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingAggregatableValueEntry: + return cls( + values=[ + AttributionReportingAggregatableValueDictEntry.from_json(i) + for i in json["values"] + ], + filters=AttributionReportingFilterPair.from_json(json["filters"]), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingEventTriggerData: + data: UnsignedInt64AsBase10 + + priority: SignedInt64AsBase10 + + filters: AttributionReportingFilterPair + + dedup_key: typing.Optional[UnsignedInt64AsBase10] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["data"] = self.data.to_json() + json["priority"] = self.priority.to_json() + json["filters"] = self.filters.to_json() + if self.dedup_key is not None: + json["dedupKey"] = self.dedup_key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingEventTriggerData: + return cls( + data=UnsignedInt64AsBase10.from_json(json["data"]), + priority=SignedInt64AsBase10.from_json(json["priority"]), + filters=AttributionReportingFilterPair.from_json(json["filters"]), + dedup_key=( + UnsignedInt64AsBase10.from_json(json["dedupKey"]) + if json.get("dedupKey", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingAggregatableTriggerData: + key_piece: UnsignedInt128AsBase16 + + source_keys: typing.List[str] + + filters: AttributionReportingFilterPair + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["keyPiece"] = self.key_piece.to_json() + json["sourceKeys"] = [i for i in self.source_keys] + json["filters"] = self.filters.to_json() + return json + + @classmethod + def from_json( + cls, json: T_JSON_DICT + ) -> AttributionReportingAggregatableTriggerData: + return cls( + key_piece=UnsignedInt128AsBase16.from_json(json["keyPiece"]), + source_keys=[str(i) for i in json["sourceKeys"]], + filters=AttributionReportingFilterPair.from_json(json["filters"]), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingAggregatableDedupKey: + filters: AttributionReportingFilterPair + + dedup_key: typing.Optional[UnsignedInt64AsBase10] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["filters"] = self.filters.to_json() + if self.dedup_key is not None: + json["dedupKey"] = self.dedup_key.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingAggregatableDedupKey: + return cls( + filters=AttributionReportingFilterPair.from_json(json["filters"]), + dedup_key=( + UnsignedInt64AsBase10.from_json(json["dedupKey"]) + if json.get("dedupKey", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AttributionReportingTriggerRegistration: + filters: AttributionReportingFilterPair + + aggregatable_dedup_keys: typing.List[AttributionReportingAggregatableDedupKey] + + event_trigger_data: typing.List[AttributionReportingEventTriggerData] + + aggregatable_trigger_data: typing.List[AttributionReportingAggregatableTriggerData] + + aggregatable_values: typing.List[AttributionReportingAggregatableValueEntry] + + debug_reporting: bool + + source_registration_time_config: AttributionReportingSourceRegistrationTimeConfig + + debug_key: typing.Optional[UnsignedInt64AsBase10] = None + + aggregation_coordinator_origin: typing.Optional[str] = None + + trigger_context_id: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["filters"] = self.filters.to_json() + json["aggregatableDedupKeys"] = [ + i.to_json() for i in self.aggregatable_dedup_keys + ] + json["eventTriggerData"] = [i.to_json() for i in self.event_trigger_data] + json["aggregatableTriggerData"] = [ + i.to_json() for i in self.aggregatable_trigger_data + ] + json["aggregatableValues"] = [i.to_json() for i in self.aggregatable_values] + json["debugReporting"] = self.debug_reporting + json["sourceRegistrationTimeConfig"] = ( + self.source_registration_time_config.to_json() + ) + if self.debug_key is not None: + json["debugKey"] = self.debug_key.to_json() + if self.aggregation_coordinator_origin is not None: + json["aggregationCoordinatorOrigin"] = self.aggregation_coordinator_origin + if self.trigger_context_id is not None: + json["triggerContextId"] = self.trigger_context_id + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingTriggerRegistration: + return cls( + filters=AttributionReportingFilterPair.from_json(json["filters"]), + aggregatable_dedup_keys=[ + AttributionReportingAggregatableDedupKey.from_json(i) + for i in json["aggregatableDedupKeys"] + ], + event_trigger_data=[ + AttributionReportingEventTriggerData.from_json(i) + for i in json["eventTriggerData"] + ], + aggregatable_trigger_data=[ + AttributionReportingAggregatableTriggerData.from_json(i) + for i in json["aggregatableTriggerData"] + ], + aggregatable_values=[ + AttributionReportingAggregatableValueEntry.from_json(i) + for i in json["aggregatableValues"] + ], + debug_reporting=bool(json["debugReporting"]), + source_registration_time_config=AttributionReportingSourceRegistrationTimeConfig.from_json( + json["sourceRegistrationTimeConfig"] + ), + debug_key=( + UnsignedInt64AsBase10.from_json(json["debugKey"]) + if json.get("debugKey", None) is not None + else None + ), + aggregation_coordinator_origin=( + str(json["aggregationCoordinatorOrigin"]) + if json.get("aggregationCoordinatorOrigin", None) is not None + else None + ), + trigger_context_id=( + str(json["triggerContextId"]) + if json.get("triggerContextId", None) is not None + else None + ), + )
+ + + +
+[docs] +class AttributionReportingEventLevelResult(enum.Enum): + SUCCESS = "success" + SUCCESS_DROPPED_LOWER_PRIORITY = "successDroppedLowerPriority" + INTERNAL_ERROR = "internalError" + NO_CAPACITY_FOR_ATTRIBUTION_DESTINATION = "noCapacityForAttributionDestination" + NO_MATCHING_SOURCES = "noMatchingSources" + DEDUPLICATED = "deduplicated" + EXCESSIVE_ATTRIBUTIONS = "excessiveAttributions" + PRIORITY_TOO_LOW = "priorityTooLow" + NEVER_ATTRIBUTED_SOURCE = "neverAttributedSource" + EXCESSIVE_REPORTING_ORIGINS = "excessiveReportingOrigins" + NO_MATCHING_SOURCE_FILTER_DATA = "noMatchingSourceFilterData" + PROHIBITED_BY_BROWSER_POLICY = "prohibitedByBrowserPolicy" + NO_MATCHING_CONFIGURATIONS = "noMatchingConfigurations" + EXCESSIVE_REPORTS = "excessiveReports" + FALSELY_ATTRIBUTED_SOURCE = "falselyAttributedSource" + REPORT_WINDOW_PASSED = "reportWindowPassed" + NOT_REGISTERED = "notRegistered" + REPORT_WINDOW_NOT_STARTED = "reportWindowNotStarted" + NO_MATCHING_TRIGGER_DATA = "noMatchingTriggerData" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingEventLevelResult: + return cls(json)
+ + + +
+[docs] +class AttributionReportingAggregatableResult(enum.Enum): + SUCCESS = "success" + INTERNAL_ERROR = "internalError" + NO_CAPACITY_FOR_ATTRIBUTION_DESTINATION = "noCapacityForAttributionDestination" + NO_MATCHING_SOURCES = "noMatchingSources" + EXCESSIVE_ATTRIBUTIONS = "excessiveAttributions" + EXCESSIVE_REPORTING_ORIGINS = "excessiveReportingOrigins" + NO_HISTOGRAMS = "noHistograms" + INSUFFICIENT_BUDGET = "insufficientBudget" + NO_MATCHING_SOURCE_FILTER_DATA = "noMatchingSourceFilterData" + NOT_REGISTERED = "notRegistered" + PROHIBITED_BY_BROWSER_POLICY = "prohibitedByBrowserPolicy" + DEDUPLICATED = "deduplicated" + REPORT_WINDOW_PASSED = "reportWindowPassed" + EXCESSIVE_REPORTS = "excessiveReports" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AttributionReportingAggregatableResult: + return cls(json)
+ + + +
+[docs] +@dataclass +class RelatedWebsiteSet: + """ + A single Related Website Set object. + """ + + #: The primary site of this set, along with the ccTLDs if there is any. + primary_sites: typing.List[str] + + #: The associated sites of this set, along with the ccTLDs if there is any. + associated_sites: typing.List[str] + + #: The service sites of this set, along with the ccTLDs if there is any. + service_sites: typing.List[str] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["primarySites"] = [i for i in self.primary_sites] + json["associatedSites"] = [i for i in self.associated_sites] + json["serviceSites"] = [i for i in self.service_sites] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RelatedWebsiteSet: + return cls( + primary_sites=[str(i) for i in json["primarySites"]], + associated_sites=[str(i) for i in json["associatedSites"]], + service_sites=[str(i) for i in json["serviceSites"]], + )
+ + + +
+[docs] +def get_storage_key_for_frame( + frame_id: page.FrameId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SerializedStorageKey]: + """ + Returns a storage key given a frame id. + + :param frame_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["frameId"] = frame_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Storage.getStorageKeyForFrame", + "params": params, + } + json = yield cmd_dict + return SerializedStorageKey.from_json(json["storageKey"])
+ + + +
+[docs] +def clear_data_for_origin( + origin: str, storage_types: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears storage for origin. + + :param origin: Security origin. + :param storage_types: Comma separated list of StorageType to clear. + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + params["storageTypes"] = storage_types + cmd_dict: T_JSON_DICT = { + "method": "Storage.clearDataForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_data_for_storage_key( + storage_key: str, storage_types: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears storage for storage key. + + :param storage_key: Storage key. + :param storage_types: Comma separated list of StorageType to clear. + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + params["storageTypes"] = storage_types + cmd_dict: T_JSON_DICT = { + "method": "Storage.clearDataForStorageKey", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_cookies( + browser_context_id: typing.Optional[browser.BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[network.Cookie]]: + """ + Returns all browser cookies. + + :param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint. + :returns: Array of cookie objects. + """ + params: T_JSON_DICT = dict() + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Storage.getCookies", + "params": params, + } + json = yield cmd_dict + return [network.Cookie.from_json(i) for i in json["cookies"]]
+ + + +
+[docs] +def set_cookies( + cookies: typing.List[network.CookieParam], + browser_context_id: typing.Optional[browser.BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets given cookies. + + :param cookies: Cookies to be set. + :param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint. + """ + params: T_JSON_DICT = dict() + params["cookies"] = [i.to_json() for i in cookies] + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Storage.setCookies", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_cookies( + browser_context_id: typing.Optional[browser.BrowserContextID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears cookies. + + :param browser_context_id: *(Optional)* Browser context to use when called on the browser endpoint. + """ + params: T_JSON_DICT = dict() + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Storage.clearCookies", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_usage_and_quota( + origin: str, +) -> typing.Generator[ + T_JSON_DICT, + T_JSON_DICT, + typing.Tuple[float, float, bool, typing.List[UsageForType]], +]: + """ + Returns usage and quota in bytes. + + :param origin: Security origin. + :returns: A tuple with the following items: + + 0. **usage** - Storage usage (bytes). + 1. **quota** - Storage quota (bytes). + 2. **overrideActive** - Whether or not the origin has an active storage quota override + 3. **usageBreakdown** - Storage usage per type (bytes). + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.getUsageAndQuota", + "params": params, + } + json = yield cmd_dict + return ( + float(json["usage"]), + float(json["quota"]), + bool(json["overrideActive"]), + [UsageForType.from_json(i) for i in json["usageBreakdown"]], + )
+ + + +
+[docs] +def override_quota_for_origin( + origin: str, quota_size: typing.Optional[float] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Override quota for the specified origin + + **EXPERIMENTAL** + + :param origin: Security origin. + :param quota_size: *(Optional)* The quota size (in bytes) to override the original quota with. If this is called multiple times, the overridden quota will be equal to the quotaSize provided in the final call. If this is called without specifying a quotaSize, the quota will be reset to the default value for the specified origin. If this is called multiple times with different origins, the override will be maintained for each origin until it is disabled (called without a quotaSize). + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + if quota_size is not None: + params["quotaSize"] = quota_size + cmd_dict: T_JSON_DICT = { + "method": "Storage.overrideQuotaForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def track_cache_storage_for_origin( + origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Registers origin to be notified when an update occurs to its cache storage list. + + :param origin: Security origin. + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.trackCacheStorageForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def track_cache_storage_for_storage_key( + storage_key: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Registers storage key to be notified when an update occurs to its cache storage list. + + :param storage_key: Storage key. + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + cmd_dict: T_JSON_DICT = { + "method": "Storage.trackCacheStorageForStorageKey", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def track_indexed_db_for_origin( + origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Registers origin to be notified when an update occurs to its IndexedDB. + + :param origin: Security origin. + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.trackIndexedDBForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def track_indexed_db_for_storage_key( + storage_key: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Registers storage key to be notified when an update occurs to its IndexedDB. + + :param storage_key: Storage key. + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + cmd_dict: T_JSON_DICT = { + "method": "Storage.trackIndexedDBForStorageKey", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def untrack_cache_storage_for_origin( + origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Unregisters origin from receiving notifications for cache storage. + + :param origin: Security origin. + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.untrackCacheStorageForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def untrack_cache_storage_for_storage_key( + storage_key: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Unregisters storage key from receiving notifications for cache storage. + + :param storage_key: Storage key. + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + cmd_dict: T_JSON_DICT = { + "method": "Storage.untrackCacheStorageForStorageKey", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def untrack_indexed_db_for_origin( + origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Unregisters origin from receiving notifications for IndexedDB. + + :param origin: Security origin. + """ + params: T_JSON_DICT = dict() + params["origin"] = origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.untrackIndexedDBForOrigin", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def untrack_indexed_db_for_storage_key( + storage_key: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Unregisters storage key from receiving notifications for IndexedDB. + + :param storage_key: Storage key. + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + cmd_dict: T_JSON_DICT = { + "method": "Storage.untrackIndexedDBForStorageKey", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_trust_tokens() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[TrustTokens]] +): + """ + Returns the number of stored Trust Tokens per issuer for the + current browsing context. + + **EXPERIMENTAL** + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Storage.getTrustTokens", + } + json = yield cmd_dict + return [TrustTokens.from_json(i) for i in json["tokens"]]
+ + + +
+[docs] +def clear_trust_tokens( + issuer_origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Removes all Trust Tokens issued by the provided issuerOrigin. + Leaves other stored data, including the issuer's Redemption Records, intact. + + **EXPERIMENTAL** + + :param issuer_origin: + :returns: True if any tokens were deleted, false otherwise. + """ + params: T_JSON_DICT = dict() + params["issuerOrigin"] = issuer_origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.clearTrustTokens", + "params": params, + } + json = yield cmd_dict + return bool(json["didDeleteTokens"])
+ + + +
+[docs] +def get_interest_group_details( + owner_origin: str, name: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, dict]: + """ + Gets details for a named interest group. + + **EXPERIMENTAL** + + :param owner_origin: + :param name: + :returns: This largely corresponds to: https://wicg.github.io/turtledove/#dictdef-generatebidinterestgroup but has absolute expirationTime instead of relative lifetimeMs and also adds joiningOrigin. + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + params["name"] = name + cmd_dict: T_JSON_DICT = { + "method": "Storage.getInterestGroupDetails", + "params": params, + } + json = yield cmd_dict + return dict(json["details"])
+ + + +
+[docs] +def set_interest_group_tracking( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables/Disables issuing of interestGroupAccessed events. + + **EXPERIMENTAL** + + :param enable: + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Storage.setInterestGroupTracking", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_interest_group_auction_tracking( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables/Disables issuing of interestGroupAuctionEventOccurred and + interestGroupAuctionNetworkRequestCreated. + + **EXPERIMENTAL** + + :param enable: + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Storage.setInterestGroupAuctionTracking", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_shared_storage_metadata( + owner_origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SharedStorageMetadata]: + """ + Gets metadata for an origin's shared storage. + + **EXPERIMENTAL** + + :param owner_origin: + :returns: + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.getSharedStorageMetadata", + "params": params, + } + json = yield cmd_dict + return SharedStorageMetadata.from_json(json["metadata"])
+ + + +
+[docs] +def get_shared_storage_entries( + owner_origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[SharedStorageEntry]]: + """ + Gets the entries in an given origin's shared storage. + + **EXPERIMENTAL** + + :param owner_origin: + :returns: + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.getSharedStorageEntries", + "params": params, + } + json = yield cmd_dict + return [SharedStorageEntry.from_json(i) for i in json["entries"]]
+ + + +
+[docs] +def set_shared_storage_entry( + owner_origin: str, + key: str, + value: str, + ignore_if_present: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets entry with ``key`` and ``value`` for a given origin's shared storage. + + **EXPERIMENTAL** + + :param owner_origin: + :param key: + :param value: + :param ignore_if_present: *(Optional)* If ```ignoreIfPresent```` is included and true, then only sets the entry if ````key``` doesn't already exist. + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + params["key"] = key + params["value"] = value + if ignore_if_present is not None: + params["ignoreIfPresent"] = ignore_if_present + cmd_dict: T_JSON_DICT = { + "method": "Storage.setSharedStorageEntry", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_shared_storage_entry( + owner_origin: str, key: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes entry for ``key`` (if it exists) for a given origin's shared storage. + + **EXPERIMENTAL** + + :param owner_origin: + :param key: + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + params["key"] = key + cmd_dict: T_JSON_DICT = { + "method": "Storage.deleteSharedStorageEntry", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_shared_storage_entries( + owner_origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears all entries for a given origin's shared storage. + + **EXPERIMENTAL** + + :param owner_origin: + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.clearSharedStorageEntries", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def reset_shared_storage_budget( + owner_origin: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resets the budget for ``ownerOrigin`` by clearing all budget withdrawals. + + **EXPERIMENTAL** + + :param owner_origin: + """ + params: T_JSON_DICT = dict() + params["ownerOrigin"] = owner_origin + cmd_dict: T_JSON_DICT = { + "method": "Storage.resetSharedStorageBudget", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_shared_storage_tracking( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables/disables issuing of sharedStorageAccessed events. + + **EXPERIMENTAL** + + :param enable: + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Storage.setSharedStorageTracking", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_storage_bucket_tracking( + storage_key: str, enable: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Set tracking for a storage key's buckets. + + **EXPERIMENTAL** + + :param storage_key: + :param enable: + """ + params: T_JSON_DICT = dict() + params["storageKey"] = storage_key + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Storage.setStorageBucketTracking", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def delete_storage_bucket( + bucket: StorageBucket, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes the Storage Bucket with the given storage key and bucket name. + + **EXPERIMENTAL** + + :param bucket: + """ + params: T_JSON_DICT = dict() + params["bucket"] = bucket.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Storage.deleteStorageBucket", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def run_bounce_tracking_mitigations() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]] +): + """ + Deletes state for sites identified as potential bounce trackers, immediately. + + **EXPERIMENTAL** + + :returns: + """ + cmd_dict: T_JSON_DICT = { + "method": "Storage.runBounceTrackingMitigations", + } + json = yield cmd_dict + return [str(i) for i in json["deletedSites"]]
+ + + +
+[docs] +def set_attribution_reporting_local_testing_mode( + enabled: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + https://wicg.github.io/attribution-reporting-api/ + + **EXPERIMENTAL** + + :param enabled: If enabled, noise is suppressed and reports are sent immediately. + """ + params: T_JSON_DICT = dict() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "Storage.setAttributionReportingLocalTestingMode", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_attribution_reporting_tracking( + enable: bool, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables/disables issuing of Attribution Reporting events. + + **EXPERIMENTAL** + + :param enable: + """ + params: T_JSON_DICT = dict() + params["enable"] = enable + cmd_dict: T_JSON_DICT = { + "method": "Storage.setAttributionReportingTracking", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def send_pending_attribution_reports() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, int] +): + """ + Sends all pending Attribution Reports immediately, regardless of their + scheduled report time. + + **EXPERIMENTAL** + + :returns: The number of reports that were sent. + """ + cmd_dict: T_JSON_DICT = { + "method": "Storage.sendPendingAttributionReports", + } + json = yield cmd_dict + return int(json["numSent"])
+ + + + + + + +
+[docs] +@event_class("Storage.cacheStorageContentUpdated") +@dataclass +class CacheStorageContentUpdated: + """ + A cache's contents have been modified. + """ + + #: Origin to update. + origin: str + #: Storage key to update. + storage_key: str + #: Storage bucket to update. + bucket_id: str + #: Name of cache in origin. + cache_name: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CacheStorageContentUpdated: + return cls( + origin=str(json["origin"]), + storage_key=str(json["storageKey"]), + bucket_id=str(json["bucketId"]), + cache_name=str(json["cacheName"]), + )
+ + + +
+[docs] +@event_class("Storage.cacheStorageListUpdated") +@dataclass +class CacheStorageListUpdated: + """ + A cache has been added/deleted. + """ + + #: Origin to update. + origin: str + #: Storage key to update. + storage_key: str + #: Storage bucket to update. + bucket_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CacheStorageListUpdated: + return cls( + origin=str(json["origin"]), + storage_key=str(json["storageKey"]), + bucket_id=str(json["bucketId"]), + )
+ + + +
+[docs] +@event_class("Storage.indexedDBContentUpdated") +@dataclass +class IndexedDBContentUpdated: + """ + The origin's IndexedDB object store has been modified. + """ + + #: Origin to update. + origin: str + #: Storage key to update. + storage_key: str + #: Storage bucket to update. + bucket_id: str + #: Database to update. + database_name: str + #: ObjectStore to update. + object_store_name: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IndexedDBContentUpdated: + return cls( + origin=str(json["origin"]), + storage_key=str(json["storageKey"]), + bucket_id=str(json["bucketId"]), + database_name=str(json["databaseName"]), + object_store_name=str(json["objectStoreName"]), + )
+ + + +
+[docs] +@event_class("Storage.indexedDBListUpdated") +@dataclass +class IndexedDBListUpdated: + """ + The origin's IndexedDB database list has been modified. + """ + + #: Origin to update. + origin: str + #: Storage key to update. + storage_key: str + #: Storage bucket to update. + bucket_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> IndexedDBListUpdated: + return cls( + origin=str(json["origin"]), + storage_key=str(json["storageKey"]), + bucket_id=str(json["bucketId"]), + )
+ + + +
+[docs] +@event_class("Storage.interestGroupAccessed") +@dataclass +class InterestGroupAccessed: + """ + One of the interest groups was accessed. Note that these events are global + to all targets sharing an interest group store. + """ + + access_time: network.TimeSinceEpoch + type_: InterestGroupAccessType + owner_origin: str + name: str + #: For topLevelBid/topLevelAdditionalBid, and when appropriate, + #: win and additionalBidWin + component_seller_origin: typing.Optional[str] + #: For bid or somethingBid event, if done locally and not on a server. + bid: typing.Optional[float] + bid_currency: typing.Optional[str] + #: For non-global events --- links to interestGroupAuctionEvent + unique_auction_id: typing.Optional[InterestGroupAuctionId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InterestGroupAccessed: + return cls( + access_time=network.TimeSinceEpoch.from_json(json["accessTime"]), + type_=InterestGroupAccessType.from_json(json["type"]), + owner_origin=str(json["ownerOrigin"]), + name=str(json["name"]), + component_seller_origin=( + str(json["componentSellerOrigin"]) + if json.get("componentSellerOrigin", None) is not None + else None + ), + bid=float(json["bid"]) if json.get("bid", None) is not None else None, + bid_currency=( + str(json["bidCurrency"]) + if json.get("bidCurrency", None) is not None + else None + ), + unique_auction_id=( + InterestGroupAuctionId.from_json(json["uniqueAuctionId"]) + if json.get("uniqueAuctionId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Storage.interestGroupAuctionEventOccurred") +@dataclass +class InterestGroupAuctionEventOccurred: + """ + An auction involving interest groups is taking place. These events are + target-specific. + """ + + event_time: network.TimeSinceEpoch + type_: InterestGroupAuctionEventType + unique_auction_id: InterestGroupAuctionId + #: Set for child auctions. + parent_auction_id: typing.Optional[InterestGroupAuctionId] + #: Set for started and configResolved + auction_config: typing.Optional[dict] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InterestGroupAuctionEventOccurred: + return cls( + event_time=network.TimeSinceEpoch.from_json(json["eventTime"]), + type_=InterestGroupAuctionEventType.from_json(json["type"]), + unique_auction_id=InterestGroupAuctionId.from_json(json["uniqueAuctionId"]), + parent_auction_id=( + InterestGroupAuctionId.from_json(json["parentAuctionId"]) + if json.get("parentAuctionId", None) is not None + else None + ), + auction_config=( + dict(json["auctionConfig"]) + if json.get("auctionConfig", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Storage.interestGroupAuctionNetworkRequestCreated") +@dataclass +class InterestGroupAuctionNetworkRequestCreated: + """ + Specifies which auctions a particular network fetch may be related to, and + in what role. Note that it is not ordered with respect to + Network.requestWillBeSent (but will happen before loadingFinished + loadingFailed). + """ + + type_: InterestGroupAuctionFetchType + request_id: network.RequestId + #: This is the set of the auctions using the worklet that issued this + #: request. In the case of trusted signals, it's possible that only some of + #: them actually care about the keys being queried. + auctions: typing.List[InterestGroupAuctionId] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> InterestGroupAuctionNetworkRequestCreated: + return cls( + type_=InterestGroupAuctionFetchType.from_json(json["type"]), + request_id=network.RequestId.from_json(json["requestId"]), + auctions=[InterestGroupAuctionId.from_json(i) for i in json["auctions"]], + )
+ + + +
+[docs] +@event_class("Storage.sharedStorageAccessed") +@dataclass +class SharedStorageAccessed: + """ + Shared storage was accessed by the associated page. + The following parameters are included in all events. + """ + + #: Time of the access. + access_time: network.TimeSinceEpoch + #: Enum value indicating the Shared Storage API method invoked. + type_: SharedStorageAccessType + #: DevTools Frame Token for the primary frame tree's root. + main_frame_id: page.FrameId + #: Serialized origin for the context that invoked the Shared Storage API. + owner_origin: str + #: The sub-parameters wrapped by ``params`` are all optional and their + #: presence/absence depends on ``type``. + params: SharedStorageAccessParams + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> SharedStorageAccessed: + return cls( + access_time=network.TimeSinceEpoch.from_json(json["accessTime"]), + type_=SharedStorageAccessType.from_json(json["type"]), + main_frame_id=page.FrameId.from_json(json["mainFrameId"]), + owner_origin=str(json["ownerOrigin"]), + params=SharedStorageAccessParams.from_json(json["params"]), + )
+ + + +
+[docs] +@event_class("Storage.storageBucketCreatedOrUpdated") +@dataclass +class StorageBucketCreatedOrUpdated: + bucket_info: StorageBucketInfo + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StorageBucketCreatedOrUpdated: + return cls(bucket_info=StorageBucketInfo.from_json(json["bucketInfo"]))
+ + + +
+[docs] +@event_class("Storage.storageBucketDeleted") +@dataclass +class StorageBucketDeleted: + bucket_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> StorageBucketDeleted: + return cls(bucket_id=str(json["bucketId"]))
+ + + +
+[docs] +@event_class("Storage.attributionReportingSourceRegistered") +@dataclass +class AttributionReportingSourceRegistered: + """ + **EXPERIMENTAL** + + + """ + + registration: AttributionReportingSourceRegistration + result: AttributionReportingSourceRegistrationResult + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingSourceRegistered: + return cls( + registration=AttributionReportingSourceRegistration.from_json( + json["registration"] + ), + result=AttributionReportingSourceRegistrationResult.from_json( + json["result"] + ), + )
+ + + +
+[docs] +@event_class("Storage.attributionReportingTriggerRegistered") +@dataclass +class AttributionReportingTriggerRegistered: + """ + **EXPERIMENTAL** + + + """ + + registration: AttributionReportingTriggerRegistration + event_level: AttributionReportingEventLevelResult + aggregatable: AttributionReportingAggregatableResult + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttributionReportingTriggerRegistered: + return cls( + registration=AttributionReportingTriggerRegistration.from_json( + json["registration"] + ), + event_level=AttributionReportingEventLevelResult.from_json( + json["eventLevel"] + ), + aggregatable=AttributionReportingAggregatableResult.from_json( + json["aggregatable"] + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/system_info.html b/docs/_build/html/_modules/nodriver/cdp/system_info.html new file mode 100644 index 0000000..dedc61f --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/system_info.html @@ -0,0 +1,744 @@ + + + + + + + + nodriver.cdp.system_info - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.system_info

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: SystemInfo (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +@dataclass +class GPUDevice: + """ + Describes a single graphics processor (GPU). + """ + + #: PCI ID of the GPU vendor, if available; 0 otherwise. + vendor_id: float + + #: PCI ID of the GPU device, if available; 0 otherwise. + device_id: float + + #: String description of the GPU vendor, if the PCI ID is not available. + vendor_string: str + + #: String description of the GPU device, if the PCI ID is not available. + device_string: str + + #: String description of the GPU driver vendor. + driver_vendor: str + + #: String description of the GPU driver version. + driver_version: str + + #: Sub sys ID of the GPU, only available on Windows. + sub_sys_id: typing.Optional[float] = None + + #: Revision of the GPU, only available on Windows. + revision: typing.Optional[float] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["vendorId"] = self.vendor_id + json["deviceId"] = self.device_id + json["vendorString"] = self.vendor_string + json["deviceString"] = self.device_string + json["driverVendor"] = self.driver_vendor + json["driverVersion"] = self.driver_version + if self.sub_sys_id is not None: + json["subSysId"] = self.sub_sys_id + if self.revision is not None: + json["revision"] = self.revision + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> GPUDevice: + return cls( + vendor_id=float(json["vendorId"]), + device_id=float(json["deviceId"]), + vendor_string=str(json["vendorString"]), + device_string=str(json["deviceString"]), + driver_vendor=str(json["driverVendor"]), + driver_version=str(json["driverVersion"]), + sub_sys_id=( + float(json["subSysId"]) + if json.get("subSysId", None) is not None + else None + ), + revision=( + float(json["revision"]) + if json.get("revision", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Size: + """ + Describes the width and height dimensions of an entity. + """ + + #: Width in pixels. + width: int + + #: Height in pixels. + height: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["width"] = self.width + json["height"] = self.height + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Size: + return cls( + width=int(json["width"]), + height=int(json["height"]), + )
+ + + +
+[docs] +@dataclass +class VideoDecodeAcceleratorCapability: + """ + Describes a supported video decoding profile with its associated minimum and + maximum resolutions. + """ + + #: Video codec profile that is supported, e.g. VP9 Profile 2. + profile: str + + #: Maximum video dimensions in pixels supported for this ``profile``. + max_resolution: Size + + #: Minimum video dimensions in pixels supported for this ``profile``. + min_resolution: Size + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["profile"] = self.profile + json["maxResolution"] = self.max_resolution.to_json() + json["minResolution"] = self.min_resolution.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VideoDecodeAcceleratorCapability: + return cls( + profile=str(json["profile"]), + max_resolution=Size.from_json(json["maxResolution"]), + min_resolution=Size.from_json(json["minResolution"]), + )
+ + + +
+[docs] +@dataclass +class VideoEncodeAcceleratorCapability: + """ + Describes a supported video encoding profile with its associated maximum + resolution and maximum framerate. + """ + + #: Video codec profile that is supported, e.g H264 Main. + profile: str + + #: Maximum video dimensions in pixels supported for this ``profile``. + max_resolution: Size + + #: Maximum encoding framerate in frames per second supported for this + #: ``profile``, as fraction's numerator and denominator, e.g. 24/1 fps, + #: 24000/1001 fps, etc. + max_framerate_numerator: int + + max_framerate_denominator: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["profile"] = self.profile + json["maxResolution"] = self.max_resolution.to_json() + json["maxFramerateNumerator"] = self.max_framerate_numerator + json["maxFramerateDenominator"] = self.max_framerate_denominator + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VideoEncodeAcceleratorCapability: + return cls( + profile=str(json["profile"]), + max_resolution=Size.from_json(json["maxResolution"]), + max_framerate_numerator=int(json["maxFramerateNumerator"]), + max_framerate_denominator=int(json["maxFramerateDenominator"]), + )
+ + + +
+[docs] +class SubsamplingFormat(enum.Enum): + """ + YUV subsampling type of the pixels of a given image. + """ + + YUV420 = "yuv420" + YUV422 = "yuv422" + YUV444 = "yuv444" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> SubsamplingFormat: + return cls(json)
+ + + +
+[docs] +class ImageType(enum.Enum): + """ + Image format of a given image. + """ + + JPEG = "jpeg" + WEBP = "webp" + UNKNOWN = "unknown" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ImageType: + return cls(json)
+ + + +
+[docs] +@dataclass +class ImageDecodeAcceleratorCapability: + """ + Describes a supported image decoding profile with its associated minimum and + maximum resolutions and subsampling. + """ + + #: Image coded, e.g. Jpeg. + image_type: ImageType + + #: Maximum supported dimensions of the image in pixels. + max_dimensions: Size + + #: Minimum supported dimensions of the image in pixels. + min_dimensions: Size + + #: Optional array of supported subsampling formats, e.g. 4:2:0, if known. + subsamplings: typing.List[SubsamplingFormat] + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["imageType"] = self.image_type.to_json() + json["maxDimensions"] = self.max_dimensions.to_json() + json["minDimensions"] = self.min_dimensions.to_json() + json["subsamplings"] = [i.to_json() for i in self.subsamplings] + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ImageDecodeAcceleratorCapability: + return cls( + image_type=ImageType.from_json(json["imageType"]), + max_dimensions=Size.from_json(json["maxDimensions"]), + min_dimensions=Size.from_json(json["minDimensions"]), + subsamplings=[SubsamplingFormat.from_json(i) for i in json["subsamplings"]], + )
+ + + +
+[docs] +@dataclass +class GPUInfo: + """ + Provides information about the GPU(s) on the system. + """ + + #: The graphics devices on the system. Element 0 is the primary GPU. + devices: typing.List[GPUDevice] + + #: An optional array of GPU driver bug workarounds. + driver_bug_workarounds: typing.List[str] + + #: Supported accelerated video decoding capabilities. + video_decoding: typing.List[VideoDecodeAcceleratorCapability] + + #: Supported accelerated video encoding capabilities. + video_encoding: typing.List[VideoEncodeAcceleratorCapability] + + #: Supported accelerated image decoding capabilities. + image_decoding: typing.List[ImageDecodeAcceleratorCapability] + + #: An optional dictionary of additional GPU related attributes. + aux_attributes: typing.Optional[dict] = None + + #: An optional dictionary of graphics features and their status. + feature_status: typing.Optional[dict] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["devices"] = [i.to_json() for i in self.devices] + json["driverBugWorkarounds"] = [i for i in self.driver_bug_workarounds] + json["videoDecoding"] = [i.to_json() for i in self.video_decoding] + json["videoEncoding"] = [i.to_json() for i in self.video_encoding] + json["imageDecoding"] = [i.to_json() for i in self.image_decoding] + if self.aux_attributes is not None: + json["auxAttributes"] = self.aux_attributes + if self.feature_status is not None: + json["featureStatus"] = self.feature_status + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> GPUInfo: + return cls( + devices=[GPUDevice.from_json(i) for i in json["devices"]], + driver_bug_workarounds=[str(i) for i in json["driverBugWorkarounds"]], + video_decoding=[ + VideoDecodeAcceleratorCapability.from_json(i) + for i in json["videoDecoding"] + ], + video_encoding=[ + VideoEncodeAcceleratorCapability.from_json(i) + for i in json["videoEncoding"] + ], + image_decoding=[ + ImageDecodeAcceleratorCapability.from_json(i) + for i in json["imageDecoding"] + ], + aux_attributes=( + dict(json["auxAttributes"]) + if json.get("auxAttributes", None) is not None + else None + ), + feature_status=( + dict(json["featureStatus"]) + if json.get("featureStatus", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class ProcessInfo: + """ + Represents process info. + """ + + #: Specifies process type. + type_: str + + #: Specifies process id. + id_: int + + #: Specifies cumulative CPU usage in seconds across all threads of the + #: process since the process start. + cpu_time: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["type"] = self.type_ + json["id"] = self.id_ + json["cpuTime"] = self.cpu_time + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ProcessInfo: + return cls( + type_=str(json["type"]), + id_=int(json["id"]), + cpu_time=float(json["cpuTime"]), + )
+ + + +
+[docs] +def get_info() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[GPUInfo, str, str, str]] +): + """ + Returns information about the system. + + :returns: A tuple with the following items: + + 0. **gpu** - Information about the GPUs on the system. + 1. **modelName** - A platform-dependent description of the model of the machine. On Mac OS, this is, for example, 'MacBookPro'. Will be the empty string if not supported. + 2. **modelVersion** - A platform-dependent description of the version of the machine. On Mac OS, this is, for example, '10.1'. Will be the empty string if not supported. + 3. **commandLine** - The command line string used to launch the browser. Will be the empty string if not supported. + """ + cmd_dict: T_JSON_DICT = { + "method": "SystemInfo.getInfo", + } + json = yield cmd_dict + return ( + GPUInfo.from_json(json["gpu"]), + str(json["modelName"]), + str(json["modelVersion"]), + str(json["commandLine"]), + )
+ + + +
+[docs] +def get_feature_state( + feature_state: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Returns information about the feature state. + + :param feature_state: + :returns: + """ + params: T_JSON_DICT = dict() + params["featureState"] = feature_state + cmd_dict: T_JSON_DICT = { + "method": "SystemInfo.getFeatureState", + "params": params, + } + json = yield cmd_dict + return bool(json["featureEnabled"])
+ + + +
+[docs] +def get_process_info() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[ProcessInfo]] +): + """ + Returns information about all running processes. + + :returns: An array of process info blocks. + """ + cmd_dict: T_JSON_DICT = { + "method": "SystemInfo.getProcessInfo", + } + json = yield cmd_dict + return [ProcessInfo.from_json(i) for i in json["processInfo"]]
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/target.html b/docs/_build/html/_modules/nodriver/cdp/target.html new file mode 100644 index 0000000..c6e6734 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/target.html @@ -0,0 +1,1140 @@ + + + + + + + + nodriver.cdp.target - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.target

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Target
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import browser
+from . import page
+from deprecated.sphinx import deprecated  # type: ignore
+
+
+
+[docs] +class TargetID(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> TargetID: + return cls(json) + + def __repr__(self): + return "TargetID({})".format(super().__repr__())
+ + + +
+[docs] +class SessionID(str): + """ + Unique identifier of attached debugging session. + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> SessionID: + return cls(json) + + def __repr__(self): + return "SessionID({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class TargetInfo: + target_id: TargetID + + #: List of types: https://source.chromium.org/chromium/chromium/src/+/main:content/browser/devtools/devtools_agent_host_impl.cc?ss=chromium&q=f:devtools%20-f:out%20%22::kTypeTab%5B%5D%22 + type_: str + + title: str + + url: str + + #: Whether the target has an attached client. + attached: bool + + #: Whether the target has access to the originating window. + can_access_opener: bool + + #: Opener target Id + opener_id: typing.Optional[TargetID] = None + + #: Frame id of originating window (is only set if target has an opener). + opener_frame_id: typing.Optional[page.FrameId] = None + + browser_context_id: typing.Optional[browser.BrowserContextID] = None + + #: Provides additional details for specific target types. For example, for + #: the type of "page", this may be set to "portal" or "prerender". + subtype: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["targetId"] = self.target_id.to_json() + json["type"] = self.type_ + json["title"] = self.title + json["url"] = self.url + json["attached"] = self.attached + json["canAccessOpener"] = self.can_access_opener + if self.opener_id is not None: + json["openerId"] = self.opener_id.to_json() + if self.opener_frame_id is not None: + json["openerFrameId"] = self.opener_frame_id.to_json() + if self.browser_context_id is not None: + json["browserContextId"] = self.browser_context_id.to_json() + if self.subtype is not None: + json["subtype"] = self.subtype + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetInfo: + return cls( + target_id=TargetID.from_json(json["targetId"]), + type_=str(json["type"]), + title=str(json["title"]), + url=str(json["url"]), + attached=bool(json["attached"]), + can_access_opener=bool(json["canAccessOpener"]), + opener_id=( + TargetID.from_json(json["openerId"]) + if json.get("openerId", None) is not None + else None + ), + opener_frame_id=( + page.FrameId.from_json(json["openerFrameId"]) + if json.get("openerFrameId", None) is not None + else None + ), + browser_context_id=( + browser.BrowserContextID.from_json(json["browserContextId"]) + if json.get("browserContextId", None) is not None + else None + ), + subtype=( + str(json["subtype"]) if json.get("subtype", None) is not None else None + ), + )
+ + + +
+[docs] +@dataclass +class FilterEntry: + """ + A filter used by target query/discovery/auto-attach operations. + """ + + #: If set, causes exclusion of matching targets from the list. + exclude: typing.Optional[bool] = None + + #: If not present, matches any type. + type_: typing.Optional[str] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.exclude is not None: + json["exclude"] = self.exclude + if self.type_ is not None: + json["type"] = self.type_ + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> FilterEntry: + return cls( + exclude=( + bool(json["exclude"]) if json.get("exclude", None) is not None else None + ), + type_=str(json["type"]) if json.get("type", None) is not None else None, + )
+ + + +
+[docs] +class TargetFilter(list): + """ + The entries in TargetFilter are matched sequentially against targets and + the first entry that matches determines if the target is included or not, + depending on the value of ``exclude`` field in the entry. + If filter is not specified, the one assumed is + [{type: "browser", exclude: true}, {type: "tab", exclude: true}, {}] + (i.e. include everything but ``browser`` and ``tab``). + """ + + def to_json(self) -> typing.List[FilterEntry]: + return self + + @classmethod + def from_json(cls, json: typing.List[FilterEntry]) -> TargetFilter: + return cls(json) + + def __repr__(self): + return "TargetFilter({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class RemoteLocation: + host: str + + port: int + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["host"] = self.host + json["port"] = self.port + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> RemoteLocation: + return cls( + host=str(json["host"]), + port=int(json["port"]), + )
+ + + +
+[docs] +def activate_target( + target_id: TargetID, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Activates (focuses) the target. + + :param target_id: + """ + params: T_JSON_DICT = dict() + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.activateTarget", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def attach_to_target( + target_id: TargetID, flatten: typing.Optional[bool] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SessionID]: + """ + Attaches to the target with given id. + + :param target_id: + :param flatten: *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325. + :returns: Id assigned to the session. + """ + params: T_JSON_DICT = dict() + params["targetId"] = target_id.to_json() + if flatten is not None: + params["flatten"] = flatten + cmd_dict: T_JSON_DICT = { + "method": "Target.attachToTarget", + "params": params, + } + json = yield cmd_dict + return SessionID.from_json(json["sessionId"])
+ + + +
+[docs] +def attach_to_browser_target() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, SessionID]: + """ + Attaches to the browser target, only uses flat sessionId mode. + + **EXPERIMENTAL** + + :returns: Id assigned to the session. + """ + cmd_dict: T_JSON_DICT = { + "method": "Target.attachToBrowserTarget", + } + json = yield cmd_dict + return SessionID.from_json(json["sessionId"])
+ + + +
+[docs] +def close_target( + target_id: TargetID, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, bool]: + """ + Closes the target. If the target is a page that gets closed too. + + :param target_id: + :returns: Always set to true. If an error occurs, the response indicates protocol error. + """ + params: T_JSON_DICT = dict() + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.closeTarget", + "params": params, + } + json = yield cmd_dict + return bool(json["success"])
+ + + +
+[docs] +def expose_dev_tools_protocol( + target_id: TargetID, binding_name: typing.Optional[str] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Inject object to the target's main frame that provides a communication + channel with browser target. + + Injected object will be available as ``window[bindingName]``. + + The object has the following API: + - ``binding.send(json)`` - a method to send messages over the remote debugging protocol + - ``binding.onmessage = json => handleMessage(json)`` - a callback that will be called for the protocol notifications and command responses. + + **EXPERIMENTAL** + + :param target_id: + :param binding_name: *(Optional)* Binding name, 'cdp' if not specified. + """ + params: T_JSON_DICT = dict() + params["targetId"] = target_id.to_json() + if binding_name is not None: + params["bindingName"] = binding_name + cmd_dict: T_JSON_DICT = { + "method": "Target.exposeDevToolsProtocol", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def create_browser_context( + dispose_on_detach: typing.Optional[bool] = None, + proxy_server: typing.Optional[str] = None, + proxy_bypass_list: typing.Optional[str] = None, + origins_with_universal_network_access: typing.Optional[typing.List[str]] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, browser.BrowserContextID]: + """ + Creates a new empty BrowserContext. Similar to an incognito profile but you can have more than + one. + + :param dispose_on_detach: **(EXPERIMENTAL)** *(Optional)* If specified, disposes this context when debugging session disconnects. + :param proxy_server: **(EXPERIMENTAL)** *(Optional)* Proxy server, similar to the one passed to --proxy-server + :param proxy_bypass_list: **(EXPERIMENTAL)** *(Optional)* Proxy bypass list, similar to the one passed to --proxy-bypass-list + :param origins_with_universal_network_access: **(EXPERIMENTAL)** *(Optional)* An optional list of origins to grant unlimited cross-origin access to. Parts of the URL other than those constituting origin are ignored. + :returns: The id of the context created. + """ + params: T_JSON_DICT = dict() + if dispose_on_detach is not None: + params["disposeOnDetach"] = dispose_on_detach + if proxy_server is not None: + params["proxyServer"] = proxy_server + if proxy_bypass_list is not None: + params["proxyBypassList"] = proxy_bypass_list + if origins_with_universal_network_access is not None: + params["originsWithUniversalNetworkAccess"] = [ + i for i in origins_with_universal_network_access + ] + cmd_dict: T_JSON_DICT = { + "method": "Target.createBrowserContext", + "params": params, + } + json = yield cmd_dict + return browser.BrowserContextID.from_json(json["browserContextId"])
+ + + +
+[docs] +def get_browser_contexts() -> ( + typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[browser.BrowserContextID]] +): + """ + Returns all browser contexts created with ``Target.createBrowserContext`` method. + + :returns: An array of browser context ids. + """ + cmd_dict: T_JSON_DICT = { + "method": "Target.getBrowserContexts", + } + json = yield cmd_dict + return [browser.BrowserContextID.from_json(i) for i in json["browserContextIds"]]
+ + + +
+[docs] +def create_target( + url: str, + width: typing.Optional[int] = None, + height: typing.Optional[int] = None, + browser_context_id: typing.Optional[browser.BrowserContextID] = None, + enable_begin_frame_control: typing.Optional[bool] = None, + new_window: typing.Optional[bool] = None, + background: typing.Optional[bool] = None, + for_tab: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, TargetID]: + """ + Creates a new page. + + :param url: The initial URL the page will be navigated to. An empty string indicates about:blank. + :param width: *(Optional)* Frame width in DIP (headless chrome only). + :param height: *(Optional)* Frame height in DIP (headless chrome only). + :param browser_context_id: **(EXPERIMENTAL)** *(Optional)* The browser context to create the page in. + :param enable_begin_frame_control: **(EXPERIMENTAL)** *(Optional)* Whether BeginFrames for this target will be controlled via DevTools (headless chrome only, not supported on MacOS yet, false by default). + :param new_window: *(Optional)* Whether to create a new Window or Tab (chrome-only, false by default). + :param background: *(Optional)* Whether to create the target in background or foreground (chrome-only, false by default). + :param for_tab: **(EXPERIMENTAL)** *(Optional)* Whether to create the target of type "tab". + :returns: The id of the page opened. + """ + params: T_JSON_DICT = dict() + params["url"] = url + if width is not None: + params["width"] = width + if height is not None: + params["height"] = height + if browser_context_id is not None: + params["browserContextId"] = browser_context_id.to_json() + if enable_begin_frame_control is not None: + params["enableBeginFrameControl"] = enable_begin_frame_control + if new_window is not None: + params["newWindow"] = new_window + if background is not None: + params["background"] = background + if for_tab is not None: + params["forTab"] = for_tab + cmd_dict: T_JSON_DICT = { + "method": "Target.createTarget", + "params": params, + } + json = yield cmd_dict + return TargetID.from_json(json["targetId"])
+ + + +
+[docs] +def detach_from_target( + session_id: typing.Optional[SessionID] = None, + target_id: typing.Optional[TargetID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Detaches session with given id. + + :param session_id: *(Optional)* Session to detach. + :param target_id: **(DEPRECATED)** *(Optional)* Deprecated. + """ + params: T_JSON_DICT = dict() + if session_id is not None: + params["sessionId"] = session_id.to_json() + if target_id is not None: + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.detachFromTarget", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def dispose_browser_context( + browser_context_id: browser.BrowserContextID, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Deletes a BrowserContext. All the belonging pages will be closed without calling their + beforeunload hooks. + + :param browser_context_id: + """ + params: T_JSON_DICT = dict() + params["browserContextId"] = browser_context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.disposeBrowserContext", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_target_info( + target_id: typing.Optional[TargetID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, TargetInfo]: + """ + Returns information about a target. + + **EXPERIMENTAL** + + :param target_id: *(Optional)* + :returns: + """ + params: T_JSON_DICT = dict() + if target_id is not None: + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.getTargetInfo", + "params": params, + } + json = yield cmd_dict + return TargetInfo.from_json(json["targetInfo"])
+ + + +
+[docs] +def get_targets( + filter_: typing.Optional[TargetFilter] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[TargetInfo]]: + """ + Retrieves a list of available targets. + + :param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be reported. If filter is not specified and target discovery is currently enabled, a filter used for target discovery is used for consistency. + :returns: The list of targets. + """ + params: T_JSON_DICT = dict() + if filter_ is not None: + params["filter"] = filter_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.getTargets", + "params": params, + } + json = yield cmd_dict + return [TargetInfo.from_json(i) for i in json["targetInfos"]]
+ + + +
+[docs] +@deprecated(version="1.3") +def send_message_to_target( + message: str, + session_id: typing.Optional[SessionID] = None, + target_id: typing.Optional[TargetID] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sends protocol message over session with given id. + Consider using flat mode instead; see commands attachToTarget, setAutoAttach, + and crbug.com/991325. + + .. deprecated:: 1.3 + + :param message: + :param session_id: *(Optional)* Identifier of the session. + :param target_id: **(DEPRECATED)** *(Optional)* Deprecated. + """ + params: T_JSON_DICT = dict() + params["message"] = message + if session_id is not None: + params["sessionId"] = session_id.to_json() + if target_id is not None: + params["targetId"] = target_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.sendMessageToTarget", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_auto_attach( + auto_attach: bool, + wait_for_debugger_on_start: bool, + flatten: typing.Optional[bool] = None, + filter_: typing.Optional[TargetFilter] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Controls whether to automatically attach to new targets which are considered to be related to + this one. When turned on, attaches to all existing related targets as well. When turned off, + automatically detaches from all currently attached targets. + This also clears all targets added by ``autoAttachRelated`` from the list of targets to watch + for creation of related targets. + + :param auto_attach: Whether to auto-attach to related targets. + :param wait_for_debugger_on_start: Whether to pause new targets when attaching to them. Use ```Runtime.runIfWaitingForDebugger``` to run paused targets. + :param flatten: **(EXPERIMENTAL)** *(Optional)* Enables "flat" access to the session via specifying sessionId attribute in the commands. We plan to make this the default, deprecate non-flattened mode, and eventually retire it. See crbug.com/991325. + :param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be attached. + """ + params: T_JSON_DICT = dict() + params["autoAttach"] = auto_attach + params["waitForDebuggerOnStart"] = wait_for_debugger_on_start + if flatten is not None: + params["flatten"] = flatten + if filter_ is not None: + params["filter"] = filter_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.setAutoAttach", + "params": params, + } + json = yield cmd_dict
+ + + + + + + +
+[docs] +def set_discover_targets( + discover: bool, filter_: typing.Optional[TargetFilter] = None +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Controls whether to discover available targets and notify via + ``targetCreated/targetInfoChanged/targetDestroyed`` events. + + :param discover: Whether to discover available targets. + :param filter_: **(EXPERIMENTAL)** *(Optional)* Only targets matching filter will be attached. If ```discover```` is false, ````filter``` must be omitted or empty. + """ + params: T_JSON_DICT = dict() + params["discover"] = discover + if filter_ is not None: + params["filter"] = filter_.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Target.setDiscoverTargets", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_remote_locations( + locations: typing.List[RemoteLocation], +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables target discovery for the specified locations, when ``setDiscoverTargets`` was set to + ``true``. + + **EXPERIMENTAL** + + :param locations: List of remote locations. + """ + params: T_JSON_DICT = dict() + params["locations"] = [i.to_json() for i in locations] + cmd_dict: T_JSON_DICT = { + "method": "Target.setRemoteLocations", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Target.attachedToTarget") +@dataclass +class AttachedToTarget: + """ + **EXPERIMENTAL** + + Issued when attached to target because of auto-attach or ``attachToTarget`` command. + """ + + #: Identifier assigned to the session used to send/receive messages. + session_id: SessionID + target_info: TargetInfo + waiting_for_debugger: bool + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AttachedToTarget: + return cls( + session_id=SessionID.from_json(json["sessionId"]), + target_info=TargetInfo.from_json(json["targetInfo"]), + waiting_for_debugger=bool(json["waitingForDebugger"]), + )
+ + + +
+[docs] +@event_class("Target.detachedFromTarget") +@dataclass +class DetachedFromTarget: + """ + **EXPERIMENTAL** + + Issued when detached from target for any reason (including ``detachFromTarget`` command). Can be + issued multiple times per target if multiple sessions have been attached to it. + """ + + #: Detached session identifier. + session_id: SessionID + #: Deprecated. + target_id: typing.Optional[TargetID] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DetachedFromTarget: + return cls( + session_id=SessionID.from_json(json["sessionId"]), + target_id=( + TargetID.from_json(json["targetId"]) + if json.get("targetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Target.receivedMessageFromTarget") +@dataclass +class ReceivedMessageFromTarget: + """ + Notifies about a new protocol message received from the session (as reported in + ``attachedToTarget`` event). + """ + + #: Identifier of a session which sends a message. + session_id: SessionID + message: str + #: Deprecated. + target_id: typing.Optional[TargetID] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ReceivedMessageFromTarget: + return cls( + session_id=SessionID.from_json(json["sessionId"]), + message=str(json["message"]), + target_id=( + TargetID.from_json(json["targetId"]) + if json.get("targetId", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("Target.targetCreated") +@dataclass +class TargetCreated: + """ + Issued when a possible inspection target is created. + """ + + target_info: TargetInfo + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetCreated: + return cls(target_info=TargetInfo.from_json(json["targetInfo"]))
+ + + +
+[docs] +@event_class("Target.targetDestroyed") +@dataclass +class TargetDestroyed: + """ + Issued when a target is destroyed. + """ + + target_id: TargetID + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetDestroyed: + return cls(target_id=TargetID.from_json(json["targetId"]))
+ + + +
+[docs] +@event_class("Target.targetCrashed") +@dataclass +class TargetCrashed: + """ + Issued when a target has crashed. + """ + + target_id: TargetID + #: Termination status type. + status: str + #: Termination error code. + error_code: int + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetCrashed: + return cls( + target_id=TargetID.from_json(json["targetId"]), + status=str(json["status"]), + error_code=int(json["errorCode"]), + )
+ + + +
+[docs] +@event_class("Target.targetInfoChanged") +@dataclass +class TargetInfoChanged: + """ + Issued when some information about a target has changed. This only happens between + ``targetCreated`` and ``targetDestroyed``. + """ + + target_info: TargetInfo + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TargetInfoChanged: + return cls(target_info=TargetInfo.from_json(json["targetInfo"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/tethering.html b/docs/_build/html/_modules/nodriver/cdp/tethering.html new file mode 100644 index 0000000..1304e85 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/tethering.html @@ -0,0 +1,370 @@ + + + + + + + + nodriver.cdp.tethering - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.tethering

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Tethering (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +def bind(port: int) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Request browser port binding. + + :param port: Port number to bind. + """ + params: T_JSON_DICT = dict() + params["port"] = port + cmd_dict: T_JSON_DICT = { + "method": "Tethering.bind", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def unbind(port: int) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Request browser port unbinding. + + :param port: Port number to unbind. + """ + params: T_JSON_DICT = dict() + params["port"] = port + cmd_dict: T_JSON_DICT = { + "method": "Tethering.unbind", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Tethering.accepted") +@dataclass +class Accepted: + """ + Informs that port was successfully bound and got a specified connection id. + """ + + #: Port number that was successfully bound. + port: int + #: Connection id to be used. + connection_id: str + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Accepted: + return cls(port=int(json["port"]), connection_id=str(json["connectionId"]))
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/tracing.html b/docs/_build/html/_modules/nodriver/cdp/tracing.html new file mode 100644 index 0000000..ca1240f --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/tracing.html @@ -0,0 +1,765 @@ + + + + + + + + nodriver.cdp.tracing - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.tracing

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: Tracing
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+from . import io
+
+
+
+[docs] +class MemoryDumpConfig(dict): + """ + Configuration for memory dump. Used only when "memory-infra" category is enabled. + """ + + def to_json(self) -> dict: + return self + + @classmethod + def from_json(cls, json: dict) -> MemoryDumpConfig: + return cls(json) + + def __repr__(self): + return "MemoryDumpConfig({})".format(super().__repr__())
+ + + +
+[docs] +@dataclass +class TraceConfig: + #: Controls how the trace buffer stores data. + record_mode: typing.Optional[str] = None + + #: Size of the trace buffer in kilobytes. If not specified or zero is passed, a default value + #: of 200 MB would be used. + trace_buffer_size_in_kb: typing.Optional[float] = None + + #: Turns on JavaScript stack sampling. + enable_sampling: typing.Optional[bool] = None + + #: Turns on system tracing. + enable_systrace: typing.Optional[bool] = None + + #: Turns on argument filter. + enable_argument_filter: typing.Optional[bool] = None + + #: Included category filters. + included_categories: typing.Optional[typing.List[str]] = None + + #: Excluded category filters. + excluded_categories: typing.Optional[typing.List[str]] = None + + #: Configuration to synthesize the delays in tracing. + synthetic_delays: typing.Optional[typing.List[str]] = None + + #: Configuration for memory dump triggers. Used only when "memory-infra" category is enabled. + memory_dump_config: typing.Optional[MemoryDumpConfig] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + if self.record_mode is not None: + json["recordMode"] = self.record_mode + if self.trace_buffer_size_in_kb is not None: + json["traceBufferSizeInKb"] = self.trace_buffer_size_in_kb + if self.enable_sampling is not None: + json["enableSampling"] = self.enable_sampling + if self.enable_systrace is not None: + json["enableSystrace"] = self.enable_systrace + if self.enable_argument_filter is not None: + json["enableArgumentFilter"] = self.enable_argument_filter + if self.included_categories is not None: + json["includedCategories"] = [i for i in self.included_categories] + if self.excluded_categories is not None: + json["excludedCategories"] = [i for i in self.excluded_categories] + if self.synthetic_delays is not None: + json["syntheticDelays"] = [i for i in self.synthetic_delays] + if self.memory_dump_config is not None: + json["memoryDumpConfig"] = self.memory_dump_config.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TraceConfig: + return cls( + record_mode=( + str(json["recordMode"]) + if json.get("recordMode", None) is not None + else None + ), + trace_buffer_size_in_kb=( + float(json["traceBufferSizeInKb"]) + if json.get("traceBufferSizeInKb", None) is not None + else None + ), + enable_sampling=( + bool(json["enableSampling"]) + if json.get("enableSampling", None) is not None + else None + ), + enable_systrace=( + bool(json["enableSystrace"]) + if json.get("enableSystrace", None) is not None + else None + ), + enable_argument_filter=( + bool(json["enableArgumentFilter"]) + if json.get("enableArgumentFilter", None) is not None + else None + ), + included_categories=( + [str(i) for i in json["includedCategories"]] + if json.get("includedCategories", None) is not None + else None + ), + excluded_categories=( + [str(i) for i in json["excludedCategories"]] + if json.get("excludedCategories", None) is not None + else None + ), + synthetic_delays=( + [str(i) for i in json["syntheticDelays"]] + if json.get("syntheticDelays", None) is not None + else None + ), + memory_dump_config=( + MemoryDumpConfig.from_json(json["memoryDumpConfig"]) + if json.get("memoryDumpConfig", None) is not None + else None + ), + )
+ + + +
+[docs] +class StreamFormat(enum.Enum): + """ + Data format of a trace. Can be either the legacy JSON format or the + protocol buffer format. Note that the JSON format will be deprecated soon. + """ + + JSON = "json" + PROTO = "proto" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StreamFormat: + return cls(json)
+ + + +
+[docs] +class StreamCompression(enum.Enum): + """ + Compression type to use for traces returned via streams. + """ + + NONE = "none" + GZIP = "gzip" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> StreamCompression: + return cls(json)
+ + + +
+[docs] +class MemoryDumpLevelOfDetail(enum.Enum): + """ + Details exposed when memory request explicitly declared. + Keep consistent with memory_dump_request_args.h and + memory_instrumentation.mojom + """ + + BACKGROUND = "background" + LIGHT = "light" + DETAILED = "detailed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> MemoryDumpLevelOfDetail: + return cls(json)
+ + + +
+[docs] +class TracingBackend(enum.Enum): + """ + Backend type to use for tracing. ``chrome`` uses the Chrome-integrated + tracing service and is supported on all platforms. ``system`` is only + supported on Chrome OS and uses the Perfetto system tracing service. + ``auto`` chooses ``system`` when the perfettoConfig provided to Tracing.start + specifies at least one non-Chrome data source; otherwise uses ``chrome``. + """ + + AUTO = "auto" + CHROME = "chrome" + SYSTEM = "system" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> TracingBackend: + return cls(json)
+ + + +
+[docs] +def end() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Stop trace events collection. + """ + cmd_dict: T_JSON_DICT = { + "method": "Tracing.end", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_categories() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[str]]: + """ + Gets supported tracing categories. + + **EXPERIMENTAL** + + :returns: A list of supported tracing categories. + """ + cmd_dict: T_JSON_DICT = { + "method": "Tracing.getCategories", + } + json = yield cmd_dict + return [str(i) for i in json["categories"]]
+ + + +
+[docs] +def record_clock_sync_marker( + sync_id: str, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Record a clock sync marker in the trace. + + **EXPERIMENTAL** + + :param sync_id: The ID of this clock sync marker + """ + params: T_JSON_DICT = dict() + params["syncId"] = sync_id + cmd_dict: T_JSON_DICT = { + "method": "Tracing.recordClockSyncMarker", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def request_memory_dump( + deterministic: typing.Optional[bool] = None, + level_of_detail: typing.Optional[MemoryDumpLevelOfDetail] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]: + """ + Request a global memory dump. + + **EXPERIMENTAL** + + :param deterministic: *(Optional)* Enables more deterministic results by forcing garbage collection + :param level_of_detail: *(Optional)* Specifies level of details in memory dump. Defaults to "detailed". + :returns: A tuple with the following items: + + 0. **dumpGuid** - GUID of the resulting global memory dump. + 1. **success** - True iff the global memory dump succeeded. + """ + params: T_JSON_DICT = dict() + if deterministic is not None: + params["deterministic"] = deterministic + if level_of_detail is not None: + params["levelOfDetail"] = level_of_detail.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Tracing.requestMemoryDump", + "params": params, + } + json = yield cmd_dict + return (str(json["dumpGuid"]), bool(json["success"]))
+ + + +
+[docs] +def start( + categories: typing.Optional[str] = None, + options: typing.Optional[str] = None, + buffer_usage_reporting_interval: typing.Optional[float] = None, + transfer_mode: typing.Optional[str] = None, + stream_format: typing.Optional[StreamFormat] = None, + stream_compression: typing.Optional[StreamCompression] = None, + trace_config: typing.Optional[TraceConfig] = None, + perfetto_config: typing.Optional[str] = None, + tracing_backend: typing.Optional[TracingBackend] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Start trace events collection. + + :param categories: **(DEPRECATED)** **(EXPERIMENTAL)** *(Optional)* Category/tag filter + :param options: **(DEPRECATED)** **(EXPERIMENTAL)** *(Optional)* Tracing options + :param buffer_usage_reporting_interval: **(EXPERIMENTAL)** *(Optional)* If set, the agent will issue bufferUsage events at this interval, specified in milliseconds + :param transfer_mode: *(Optional)* Whether to report trace events as series of dataCollected events or to save trace to a stream (defaults to ```ReportEvents````). + :param stream_format: *(Optional)* Trace data format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````json````). + :param stream_compression: **(EXPERIMENTAL)** *(Optional)* Compression format to use. This only applies when using ````ReturnAsStream```` transfer mode (defaults to ````none````) + :param trace_config: *(Optional)* + :param perfetto_config: **(EXPERIMENTAL)** *(Optional)* Base64-encoded serialized perfetto.protos.TraceConfig protobuf message When specified, the parameters ````categories````, ````options````, ````traceConfig```` are ignored. (Encoded as a base64 string when passed over JSON) + :param tracing_backend: **(EXPERIMENTAL)** *(Optional)* Backend type (defaults to ````auto```) + """ + params: T_JSON_DICT = dict() + if categories is not None: + params["categories"] = categories + if options is not None: + params["options"] = options + if buffer_usage_reporting_interval is not None: + params["bufferUsageReportingInterval"] = buffer_usage_reporting_interval + if transfer_mode is not None: + params["transferMode"] = transfer_mode + if stream_format is not None: + params["streamFormat"] = stream_format.to_json() + if stream_compression is not None: + params["streamCompression"] = stream_compression.to_json() + if trace_config is not None: + params["traceConfig"] = trace_config.to_json() + if perfetto_config is not None: + params["perfettoConfig"] = perfetto_config + if tracing_backend is not None: + params["tracingBackend"] = tracing_backend.to_json() + cmd_dict: T_JSON_DICT = { + "method": "Tracing.start", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("Tracing.bufferUsage") +@dataclass +class BufferUsage: + """ + **EXPERIMENTAL** + + + """ + + #: A number in range [0..1] that indicates the used size of event buffer as a fraction of its + #: total size. + percent_full: typing.Optional[float] + #: An approximate number of events in the trace log. + event_count: typing.Optional[float] + #: A number in range [0..1] that indicates the used size of event buffer as a fraction of its + #: total size. + value: typing.Optional[float] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BufferUsage: + return cls( + percent_full=( + float(json["percentFull"]) + if json.get("percentFull", None) is not None + else None + ), + event_count=( + float(json["eventCount"]) + if json.get("eventCount", None) is not None + else None + ), + value=float(json["value"]) if json.get("value", None) is not None else None, + )
+ + + +
+[docs] +@event_class("Tracing.dataCollected") +@dataclass +class DataCollected: + """ + **EXPERIMENTAL** + + Contains a bucket of collected trace events. When tracing is stopped collected events will be + sent as a sequence of dataCollected events followed by tracingComplete event. + """ + + value: typing.List[dict] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> DataCollected: + return cls(value=[dict(i) for i in json["value"]])
+ + + +
+[docs] +@event_class("Tracing.tracingComplete") +@dataclass +class TracingComplete: + """ + Signals that tracing is stopped and there is no trace buffers pending flush, all data were + delivered via dataCollected events. + """ + + #: Indicates whether some trace data is known to have been lost, e.g. because the trace ring + #: buffer wrapped around. + data_loss_occurred: bool + #: A handle of the stream that holds resulting trace data. + stream: typing.Optional[io.StreamHandle] + #: Trace data format of returned stream. + trace_format: typing.Optional[StreamFormat] + #: Compression format of returned stream. + stream_compression: typing.Optional[StreamCompression] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> TracingComplete: + return cls( + data_loss_occurred=bool(json["dataLossOccurred"]), + stream=( + io.StreamHandle.from_json(json["stream"]) + if json.get("stream", None) is not None + else None + ), + trace_format=( + StreamFormat.from_json(json["traceFormat"]) + if json.get("traceFormat", None) is not None + else None + ), + stream_compression=( + StreamCompression.from_json(json["streamCompression"]) + if json.get("streamCompression", None) is not None + else None + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/web_audio.html b/docs/_build/html/_modules/nodriver/cdp/web_audio.html new file mode 100644 index 0000000..0fe5069 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/web_audio.html @@ -0,0 +1,1038 @@ + + + + + + + + nodriver.cdp.web_audio - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.web_audio

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: WebAudio (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class GraphObjectId(str): + """ + An unique ID for a graph object (AudioContext, AudioNode, AudioParam) in Web Audio API + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> GraphObjectId: + return cls(json) + + def __repr__(self): + return "GraphObjectId({})".format(super().__repr__())
+ + + +
+[docs] +class ContextType(enum.Enum): + """ + Enum of BaseAudioContext types + """ + + REALTIME = "realtime" + OFFLINE = "offline" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContextType: + return cls(json)
+ + + +
+[docs] +class ContextState(enum.Enum): + """ + Enum of AudioContextState from the spec + """ + + SUSPENDED = "suspended" + RUNNING = "running" + CLOSED = "closed" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ContextState: + return cls(json)
+ + + +
+[docs] +class NodeType(str): + """ + Enum of AudioNode types + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> NodeType: + return cls(json) + + def __repr__(self): + return "NodeType({})".format(super().__repr__())
+ + + +
+[docs] +class ChannelCountMode(enum.Enum): + """ + Enum of AudioNode::ChannelCountMode from the spec + """ + + CLAMPED_MAX = "clamped-max" + EXPLICIT = "explicit" + MAX_ = "max" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ChannelCountMode: + return cls(json)
+ + + +
+[docs] +class ChannelInterpretation(enum.Enum): + """ + Enum of AudioNode::ChannelInterpretation from the spec + """ + + DISCRETE = "discrete" + SPEAKERS = "speakers" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> ChannelInterpretation: + return cls(json)
+ + + +
+[docs] +class ParamType(str): + """ + Enum of AudioParam types + """ + + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> ParamType: + return cls(json) + + def __repr__(self): + return "ParamType({})".format(super().__repr__())
+ + + +
+[docs] +class AutomationRate(enum.Enum): + """ + Enum of AudioParam::AutomationRate from the spec + """ + + A_RATE = "a-rate" + K_RATE = "k-rate" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AutomationRate: + return cls(json)
+ + + +
+[docs] +@dataclass +class ContextRealtimeData: + """ + Fields in AudioContext that change in real-time. + """ + + #: The current context time in second in BaseAudioContext. + current_time: float + + #: The time spent on rendering graph divided by render quantum duration, + #: and multiplied by 100. 100 means the audio renderer reached the full + #: capacity and glitch may occur. + render_capacity: float + + #: A running mean of callback interval. + callback_interval_mean: float + + #: A running variance of callback interval. + callback_interval_variance: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["currentTime"] = self.current_time + json["renderCapacity"] = self.render_capacity + json["callbackIntervalMean"] = self.callback_interval_mean + json["callbackIntervalVariance"] = self.callback_interval_variance + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContextRealtimeData: + return cls( + current_time=float(json["currentTime"]), + render_capacity=float(json["renderCapacity"]), + callback_interval_mean=float(json["callbackIntervalMean"]), + callback_interval_variance=float(json["callbackIntervalVariance"]), + )
+ + + +
+[docs] +@dataclass +class BaseAudioContext: + """ + Protocol object for BaseAudioContext + """ + + context_id: GraphObjectId + + context_type: ContextType + + context_state: ContextState + + #: Platform-dependent callback buffer size. + callback_buffer_size: float + + #: Number of output channels supported by audio hardware in use. + max_output_channel_count: float + + #: Context sample rate. + sample_rate: float + + realtime_data: typing.Optional[ContextRealtimeData] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["contextId"] = self.context_id.to_json() + json["contextType"] = self.context_type.to_json() + json["contextState"] = self.context_state.to_json() + json["callbackBufferSize"] = self.callback_buffer_size + json["maxOutputChannelCount"] = self.max_output_channel_count + json["sampleRate"] = self.sample_rate + if self.realtime_data is not None: + json["realtimeData"] = self.realtime_data.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> BaseAudioContext: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + context_type=ContextType.from_json(json["contextType"]), + context_state=ContextState.from_json(json["contextState"]), + callback_buffer_size=float(json["callbackBufferSize"]), + max_output_channel_count=float(json["maxOutputChannelCount"]), + sample_rate=float(json["sampleRate"]), + realtime_data=( + ContextRealtimeData.from_json(json["realtimeData"]) + if json.get("realtimeData", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class AudioListener: + """ + Protocol object for AudioListener + """ + + listener_id: GraphObjectId + + context_id: GraphObjectId + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["listenerId"] = self.listener_id.to_json() + json["contextId"] = self.context_id.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioListener: + return cls( + listener_id=GraphObjectId.from_json(json["listenerId"]), + context_id=GraphObjectId.from_json(json["contextId"]), + )
+ + + +
+[docs] +@dataclass +class AudioNode: + """ + Protocol object for AudioNode + """ + + node_id: GraphObjectId + + context_id: GraphObjectId + + node_type: NodeType + + number_of_inputs: float + + number_of_outputs: float + + channel_count: float + + channel_count_mode: ChannelCountMode + + channel_interpretation: ChannelInterpretation + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["nodeId"] = self.node_id.to_json() + json["contextId"] = self.context_id.to_json() + json["nodeType"] = self.node_type.to_json() + json["numberOfInputs"] = self.number_of_inputs + json["numberOfOutputs"] = self.number_of_outputs + json["channelCount"] = self.channel_count + json["channelCountMode"] = self.channel_count_mode.to_json() + json["channelInterpretation"] = self.channel_interpretation.to_json() + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioNode: + return cls( + node_id=GraphObjectId.from_json(json["nodeId"]), + context_id=GraphObjectId.from_json(json["contextId"]), + node_type=NodeType.from_json(json["nodeType"]), + number_of_inputs=float(json["numberOfInputs"]), + number_of_outputs=float(json["numberOfOutputs"]), + channel_count=float(json["channelCount"]), + channel_count_mode=ChannelCountMode.from_json(json["channelCountMode"]), + channel_interpretation=ChannelInterpretation.from_json( + json["channelInterpretation"] + ), + )
+ + + +
+[docs] +@dataclass +class AudioParam: + """ + Protocol object for AudioParam + """ + + param_id: GraphObjectId + + node_id: GraphObjectId + + context_id: GraphObjectId + + param_type: ParamType + + rate: AutomationRate + + default_value: float + + min_value: float + + max_value: float + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["paramId"] = self.param_id.to_json() + json["nodeId"] = self.node_id.to_json() + json["contextId"] = self.context_id.to_json() + json["paramType"] = self.param_type.to_json() + json["rate"] = self.rate.to_json() + json["defaultValue"] = self.default_value + json["minValue"] = self.min_value + json["maxValue"] = self.max_value + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioParam: + return cls( + param_id=GraphObjectId.from_json(json["paramId"]), + node_id=GraphObjectId.from_json(json["nodeId"]), + context_id=GraphObjectId.from_json(json["contextId"]), + param_type=ParamType.from_json(json["paramType"]), + rate=AutomationRate.from_json(json["rate"]), + default_value=float(json["defaultValue"]), + min_value=float(json["minValue"]), + max_value=float(json["maxValue"]), + )
+ + + +
+[docs] +def enable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enables the WebAudio domain and starts sending context lifetime events. + """ + cmd_dict: T_JSON_DICT = { + "method": "WebAudio.enable", + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disables the WebAudio domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "WebAudio.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def get_realtime_data( + context_id: GraphObjectId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, ContextRealtimeData]: + """ + Fetch the realtime data from the registered contexts. + + :param context_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["contextId"] = context_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAudio.getRealtimeData", + "params": params, + } + json = yield cmd_dict + return ContextRealtimeData.from_json(json["realtimeData"])
+ + + +
+[docs] +@event_class("WebAudio.contextCreated") +@dataclass +class ContextCreated: + """ + Notifies that a new BaseAudioContext has been created. + """ + + context: BaseAudioContext + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContextCreated: + return cls(context=BaseAudioContext.from_json(json["context"]))
+ + + +
+[docs] +@event_class("WebAudio.contextWillBeDestroyed") +@dataclass +class ContextWillBeDestroyed: + """ + Notifies that an existing BaseAudioContext will be destroyed. + """ + + context_id: GraphObjectId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContextWillBeDestroyed: + return cls(context_id=GraphObjectId.from_json(json["contextId"]))
+ + + +
+[docs] +@event_class("WebAudio.contextChanged") +@dataclass +class ContextChanged: + """ + Notifies that existing BaseAudioContext has changed some properties (id stays the same).. + """ + + context: BaseAudioContext + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> ContextChanged: + return cls(context=BaseAudioContext.from_json(json["context"]))
+ + + +
+[docs] +@event_class("WebAudio.audioListenerCreated") +@dataclass +class AudioListenerCreated: + """ + Notifies that the construction of an AudioListener has finished. + """ + + listener: AudioListener + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioListenerCreated: + return cls(listener=AudioListener.from_json(json["listener"]))
+ + + +
+[docs] +@event_class("WebAudio.audioListenerWillBeDestroyed") +@dataclass +class AudioListenerWillBeDestroyed: + """ + Notifies that a new AudioListener has been created. + """ + + context_id: GraphObjectId + listener_id: GraphObjectId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioListenerWillBeDestroyed: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + listener_id=GraphObjectId.from_json(json["listenerId"]), + )
+ + + +
+[docs] +@event_class("WebAudio.audioNodeCreated") +@dataclass +class AudioNodeCreated: + """ + Notifies that a new AudioNode has been created. + """ + + node: AudioNode + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioNodeCreated: + return cls(node=AudioNode.from_json(json["node"]))
+ + + +
+[docs] +@event_class("WebAudio.audioNodeWillBeDestroyed") +@dataclass +class AudioNodeWillBeDestroyed: + """ + Notifies that an existing AudioNode has been destroyed. + """ + + context_id: GraphObjectId + node_id: GraphObjectId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioNodeWillBeDestroyed: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + node_id=GraphObjectId.from_json(json["nodeId"]), + )
+ + + +
+[docs] +@event_class("WebAudio.audioParamCreated") +@dataclass +class AudioParamCreated: + """ + Notifies that a new AudioParam has been created. + """ + + param: AudioParam + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioParamCreated: + return cls(param=AudioParam.from_json(json["param"]))
+ + + +
+[docs] +@event_class("WebAudio.audioParamWillBeDestroyed") +@dataclass +class AudioParamWillBeDestroyed: + """ + Notifies that an existing AudioParam has been destroyed. + """ + + context_id: GraphObjectId + node_id: GraphObjectId + param_id: GraphObjectId + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> AudioParamWillBeDestroyed: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + node_id=GraphObjectId.from_json(json["nodeId"]), + param_id=GraphObjectId.from_json(json["paramId"]), + )
+ + + +
+[docs] +@event_class("WebAudio.nodesConnected") +@dataclass +class NodesConnected: + """ + Notifies that two AudioNodes are connected. + """ + + context_id: GraphObjectId + source_id: GraphObjectId + destination_id: GraphObjectId + source_output_index: typing.Optional[float] + destination_input_index: typing.Optional[float] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodesConnected: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + source_id=GraphObjectId.from_json(json["sourceId"]), + destination_id=GraphObjectId.from_json(json["destinationId"]), + source_output_index=( + float(json["sourceOutputIndex"]) + if json.get("sourceOutputIndex", None) is not None + else None + ), + destination_input_index=( + float(json["destinationInputIndex"]) + if json.get("destinationInputIndex", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("WebAudio.nodesDisconnected") +@dataclass +class NodesDisconnected: + """ + Notifies that AudioNodes are disconnected. The destination can be null, and it means all the outgoing connections from the source are disconnected. + """ + + context_id: GraphObjectId + source_id: GraphObjectId + destination_id: GraphObjectId + source_output_index: typing.Optional[float] + destination_input_index: typing.Optional[float] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodesDisconnected: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + source_id=GraphObjectId.from_json(json["sourceId"]), + destination_id=GraphObjectId.from_json(json["destinationId"]), + source_output_index=( + float(json["sourceOutputIndex"]) + if json.get("sourceOutputIndex", None) is not None + else None + ), + destination_input_index=( + float(json["destinationInputIndex"]) + if json.get("destinationInputIndex", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("WebAudio.nodeParamConnected") +@dataclass +class NodeParamConnected: + """ + Notifies that an AudioNode is connected to an AudioParam. + """ + + context_id: GraphObjectId + source_id: GraphObjectId + destination_id: GraphObjectId + source_output_index: typing.Optional[float] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodeParamConnected: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + source_id=GraphObjectId.from_json(json["sourceId"]), + destination_id=GraphObjectId.from_json(json["destinationId"]), + source_output_index=( + float(json["sourceOutputIndex"]) + if json.get("sourceOutputIndex", None) is not None + else None + ), + )
+ + + +
+[docs] +@event_class("WebAudio.nodeParamDisconnected") +@dataclass +class NodeParamDisconnected: + """ + Notifies that an AudioNode is disconnected to an AudioParam. + """ + + context_id: GraphObjectId + source_id: GraphObjectId + destination_id: GraphObjectId + source_output_index: typing.Optional[float] + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> NodeParamDisconnected: + return cls( + context_id=GraphObjectId.from_json(json["contextId"]), + source_id=GraphObjectId.from_json(json["sourceId"]), + destination_id=GraphObjectId.from_json(json["destinationId"]), + source_output_index=( + float(json["sourceOutputIndex"]) + if json.get("sourceOutputIndex", None) is not None + else None + ), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/cdp/web_authn.html b/docs/_build/html/_modules/nodriver/cdp/web_authn.html new file mode 100644 index 0000000..3263f6c --- /dev/null +++ b/docs/_build/html/_modules/nodriver/cdp/web_authn.html @@ -0,0 +1,950 @@ + + + + + + + + nodriver.cdp.web_authn - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.cdp.web_authn

+# DO NOT EDIT THIS FILE!
+#
+# This file is generated from the CDP specification. If you need to make
+# changes, edit the generator and regenerate all of the modules.
+#
+# CDP domain: WebAuthn (experimental)
+
+from __future__ import annotations
+import enum
+import typing
+from dataclasses import dataclass
+from .util import event_class, T_JSON_DICT
+
+
+
+[docs] +class AuthenticatorId(str): + def to_json(self) -> str: + return self + + @classmethod + def from_json(cls, json: str) -> AuthenticatorId: + return cls(json) + + def __repr__(self): + return "AuthenticatorId({})".format(super().__repr__())
+ + + +
+[docs] +class AuthenticatorProtocol(enum.Enum): + U2F = "u2f" + CTAP2 = "ctap2" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AuthenticatorProtocol: + return cls(json)
+ + + +
+[docs] +class Ctap2Version(enum.Enum): + CTAP2_0 = "ctap2_0" + CTAP2_1 = "ctap2_1" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> Ctap2Version: + return cls(json)
+ + + +
+[docs] +class AuthenticatorTransport(enum.Enum): + USB = "usb" + NFC = "nfc" + BLE = "ble" + CABLE = "cable" + INTERNAL = "internal" + + def to_json(self) -> str: + return self.value + + @classmethod + def from_json(cls, json: str) -> AuthenticatorTransport: + return cls(json)
+ + + +
+[docs] +@dataclass +class VirtualAuthenticatorOptions: + protocol: AuthenticatorProtocol + + transport: AuthenticatorTransport + + #: Defaults to ctap2_0. Ignored if ``protocol`` == u2f. + ctap2_version: typing.Optional[Ctap2Version] = None + + #: Defaults to false. + has_resident_key: typing.Optional[bool] = None + + #: Defaults to false. + has_user_verification: typing.Optional[bool] = None + + #: If set to true, the authenticator will support the largeBlob extension. + #: https://w3c.github.io/webauthn#largeBlob + #: Defaults to false. + has_large_blob: typing.Optional[bool] = None + + #: If set to true, the authenticator will support the credBlob extension. + #: https://fidoalliance.org/specs/fido-v2.1-rd-20201208/fido-client-to-authenticator-protocol-v2.1-rd-20201208.html#sctn-credBlob-extension + #: Defaults to false. + has_cred_blob: typing.Optional[bool] = None + + #: If set to true, the authenticator will support the minPinLength extension. + #: https://fidoalliance.org/specs/fido-v2.1-ps-20210615/fido-client-to-authenticator-protocol-v2.1-ps-20210615.html#sctn-minpinlength-extension + #: Defaults to false. + has_min_pin_length: typing.Optional[bool] = None + + #: If set to true, the authenticator will support the prf extension. + #: https://w3c.github.io/webauthn/#prf-extension + #: Defaults to false. + has_prf: typing.Optional[bool] = None + + #: If set to true, tests of user presence will succeed immediately. + #: Otherwise, they will not be resolved. Defaults to true. + automatic_presence_simulation: typing.Optional[bool] = None + + #: Sets whether User Verification succeeds or fails for an authenticator. + #: Defaults to false. + is_user_verified: typing.Optional[bool] = None + + #: Credentials created by this authenticator will have the backup + #: eligibility (BE) flag set to this value. Defaults to false. + #: https://w3c.github.io/webauthn/#sctn-credential-backup + default_backup_eligibility: typing.Optional[bool] = None + + #: Credentials created by this authenticator will have the backup state + #: (BS) flag set to this value. Defaults to false. + #: https://w3c.github.io/webauthn/#sctn-credential-backup + default_backup_state: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["protocol"] = self.protocol.to_json() + json["transport"] = self.transport.to_json() + if self.ctap2_version is not None: + json["ctap2Version"] = self.ctap2_version.to_json() + if self.has_resident_key is not None: + json["hasResidentKey"] = self.has_resident_key + if self.has_user_verification is not None: + json["hasUserVerification"] = self.has_user_verification + if self.has_large_blob is not None: + json["hasLargeBlob"] = self.has_large_blob + if self.has_cred_blob is not None: + json["hasCredBlob"] = self.has_cred_blob + if self.has_min_pin_length is not None: + json["hasMinPinLength"] = self.has_min_pin_length + if self.has_prf is not None: + json["hasPrf"] = self.has_prf + if self.automatic_presence_simulation is not None: + json["automaticPresenceSimulation"] = self.automatic_presence_simulation + if self.is_user_verified is not None: + json["isUserVerified"] = self.is_user_verified + if self.default_backup_eligibility is not None: + json["defaultBackupEligibility"] = self.default_backup_eligibility + if self.default_backup_state is not None: + json["defaultBackupState"] = self.default_backup_state + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> VirtualAuthenticatorOptions: + return cls( + protocol=AuthenticatorProtocol.from_json(json["protocol"]), + transport=AuthenticatorTransport.from_json(json["transport"]), + ctap2_version=( + Ctap2Version.from_json(json["ctap2Version"]) + if json.get("ctap2Version", None) is not None + else None + ), + has_resident_key=( + bool(json["hasResidentKey"]) + if json.get("hasResidentKey", None) is not None + else None + ), + has_user_verification=( + bool(json["hasUserVerification"]) + if json.get("hasUserVerification", None) is not None + else None + ), + has_large_blob=( + bool(json["hasLargeBlob"]) + if json.get("hasLargeBlob", None) is not None + else None + ), + has_cred_blob=( + bool(json["hasCredBlob"]) + if json.get("hasCredBlob", None) is not None + else None + ), + has_min_pin_length=( + bool(json["hasMinPinLength"]) + if json.get("hasMinPinLength", None) is not None + else None + ), + has_prf=( + bool(json["hasPrf"]) if json.get("hasPrf", None) is not None else None + ), + automatic_presence_simulation=( + bool(json["automaticPresenceSimulation"]) + if json.get("automaticPresenceSimulation", None) is not None + else None + ), + is_user_verified=( + bool(json["isUserVerified"]) + if json.get("isUserVerified", None) is not None + else None + ), + default_backup_eligibility=( + bool(json["defaultBackupEligibility"]) + if json.get("defaultBackupEligibility", None) is not None + else None + ), + default_backup_state=( + bool(json["defaultBackupState"]) + if json.get("defaultBackupState", None) is not None + else None + ), + )
+ + + +
+[docs] +@dataclass +class Credential: + credential_id: str + + is_resident_credential: bool + + #: The ECDSA P-256 private key in PKCS#8 format. (Encoded as a base64 string when passed over JSON) + private_key: str + + #: Signature counter. This is incremented by one for each successful + #: assertion. + #: See https://w3c.github.io/webauthn/#signature-counter + sign_count: int + + #: Relying Party ID the credential is scoped to. Must be set when adding a + #: credential. + rp_id: typing.Optional[str] = None + + #: An opaque byte sequence with a maximum size of 64 bytes mapping the + #: credential to a specific user. (Encoded as a base64 string when passed over JSON) + user_handle: typing.Optional[str] = None + + #: The large blob associated with the credential. + #: See https://w3c.github.io/webauthn/#sctn-large-blob-extension (Encoded as a base64 string when passed over JSON) + large_blob: typing.Optional[str] = None + + #: Assertions returned by this credential will have the backup eligibility + #: (BE) flag set to this value. Defaults to the authenticator's + #: defaultBackupEligibility value. + backup_eligibility: typing.Optional[bool] = None + + #: Assertions returned by this credential will have the backup state (BS) + #: flag set to this value. Defaults to the authenticator's + #: defaultBackupState value. + backup_state: typing.Optional[bool] = None + + def to_json(self) -> T_JSON_DICT: + json: T_JSON_DICT = dict() + json["credentialId"] = self.credential_id + json["isResidentCredential"] = self.is_resident_credential + json["privateKey"] = self.private_key + json["signCount"] = self.sign_count + if self.rp_id is not None: + json["rpId"] = self.rp_id + if self.user_handle is not None: + json["userHandle"] = self.user_handle + if self.large_blob is not None: + json["largeBlob"] = self.large_blob + if self.backup_eligibility is not None: + json["backupEligibility"] = self.backup_eligibility + if self.backup_state is not None: + json["backupState"] = self.backup_state + return json + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> Credential: + return cls( + credential_id=str(json["credentialId"]), + is_resident_credential=bool(json["isResidentCredential"]), + private_key=str(json["privateKey"]), + sign_count=int(json["signCount"]), + rp_id=str(json["rpId"]) if json.get("rpId", None) is not None else None, + user_handle=( + str(json["userHandle"]) + if json.get("userHandle", None) is not None + else None + ), + large_blob=( + str(json["largeBlob"]) + if json.get("largeBlob", None) is not None + else None + ), + backup_eligibility=( + bool(json["backupEligibility"]) + if json.get("backupEligibility", None) is not None + else None + ), + backup_state=( + bool(json["backupState"]) + if json.get("backupState", None) is not None + else None + ), + )
+ + + +
+[docs] +def enable( + enable_ui: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Enable the WebAuthn domain and start intercepting credential storage and + retrieval with a virtual authenticator. + + :param enable_ui: *(Optional)* Whether to enable the WebAuthn user interface. Enabling the UI is recommended for debugging and demo purposes, as it is closer to the real experience. Disabling the UI is recommended for automated testing. Supported at the embedder's discretion if UI is available. Defaults to false. + """ + params: T_JSON_DICT = dict() + if enable_ui is not None: + params["enableUI"] = enable_ui + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.enable", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def disable() -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Disable the WebAuthn domain. + """ + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.disable", + } + json = yield cmd_dict
+ + + +
+[docs] +def add_virtual_authenticator( + options: VirtualAuthenticatorOptions, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, AuthenticatorId]: + """ + Creates and adds a virtual authenticator. + + :param options: + :returns: + """ + params: T_JSON_DICT = dict() + params["options"] = options.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.addVirtualAuthenticator", + "params": params, + } + json = yield cmd_dict + return AuthenticatorId.from_json(json["authenticatorId"])
+ + + +
+[docs] +def set_response_override_bits( + authenticator_id: AuthenticatorId, + is_bogus_signature: typing.Optional[bool] = None, + is_bad_uv: typing.Optional[bool] = None, + is_bad_up: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Resets parameters isBogusSignature, isBadUV, isBadUP to false if they are not present. + + :param authenticator_id: + :param is_bogus_signature: *(Optional)* If isBogusSignature is set, overrides the signature in the authenticator response to be zero. Defaults to false. + :param is_bad_uv: *(Optional)* If isBadUV is set, overrides the UV bit in the flags in the authenticator response to be zero. Defaults to false. + :param is_bad_up: *(Optional)* If isBadUP is set, overrides the UP bit in the flags in the authenticator response to be zero. Defaults to false. + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + if is_bogus_signature is not None: + params["isBogusSignature"] = is_bogus_signature + if is_bad_uv is not None: + params["isBadUV"] = is_bad_uv + if is_bad_up is not None: + params["isBadUP"] = is_bad_up + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.setResponseOverrideBits", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def remove_virtual_authenticator( + authenticator_id: AuthenticatorId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes the given authenticator. + + :param authenticator_id: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.removeVirtualAuthenticator", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def add_credential( + authenticator_id: AuthenticatorId, credential: Credential +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Adds the credential to the specified authenticator. + + :param authenticator_id: + :param credential: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["credential"] = credential.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.addCredential", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def get_credential( + authenticator_id: AuthenticatorId, credential_id: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, Credential]: + """ + Returns a single credential stored in the given virtual authenticator that + matches the credential ID. + + :param authenticator_id: + :param credential_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["credentialId"] = credential_id + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.getCredential", + "params": params, + } + json = yield cmd_dict + return Credential.from_json(json["credential"])
+ + + +
+[docs] +def get_credentials( + authenticator_id: AuthenticatorId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.List[Credential]]: + """ + Returns all the credentials stored in the given virtual authenticator. + + :param authenticator_id: + :returns: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.getCredentials", + "params": params, + } + json = yield cmd_dict + return [Credential.from_json(i) for i in json["credentials"]]
+ + + +
+[docs] +def remove_credential( + authenticator_id: AuthenticatorId, credential_id: str +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Removes a credential from the authenticator. + + :param authenticator_id: + :param credential_id: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["credentialId"] = credential_id + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.removeCredential", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def clear_credentials( + authenticator_id: AuthenticatorId, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Clears all the credentials from the specified device. + + :param authenticator_id: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.clearCredentials", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_user_verified( + authenticator_id: AuthenticatorId, is_user_verified: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets whether User Verification succeeds or fails for an authenticator. + The default is true. + + :param authenticator_id: + :param is_user_verified: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["isUserVerified"] = is_user_verified + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.setUserVerified", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_automatic_presence_simulation( + authenticator_id: AuthenticatorId, enabled: bool +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Sets whether tests of user presence will succeed immediately (if true) or fail to resolve (if false) for an authenticator. + The default is true. + + :param authenticator_id: + :param enabled: + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["enabled"] = enabled + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.setAutomaticPresenceSimulation", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +def set_credential_properties( + authenticator_id: AuthenticatorId, + credential_id: str, + backup_eligibility: typing.Optional[bool] = None, + backup_state: typing.Optional[bool] = None, +) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, None]: + """ + Allows setting credential properties. + https://w3c.github.io/webauthn/#sctn-automation-set-credential-properties + + :param authenticator_id: + :param credential_id: + :param backup_eligibility: *(Optional)* + :param backup_state: *(Optional)* + """ + params: T_JSON_DICT = dict() + params["authenticatorId"] = authenticator_id.to_json() + params["credentialId"] = credential_id + if backup_eligibility is not None: + params["backupEligibility"] = backup_eligibility + if backup_state is not None: + params["backupState"] = backup_state + cmd_dict: T_JSON_DICT = { + "method": "WebAuthn.setCredentialProperties", + "params": params, + } + json = yield cmd_dict
+ + + +
+[docs] +@event_class("WebAuthn.credentialAdded") +@dataclass +class CredentialAdded: + """ + Triggered when a credential is added to an authenticator. + """ + + authenticator_id: AuthenticatorId + credential: Credential + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CredentialAdded: + return cls( + authenticator_id=AuthenticatorId.from_json(json["authenticatorId"]), + credential=Credential.from_json(json["credential"]), + )
+ + + +
+[docs] +@event_class("WebAuthn.credentialAsserted") +@dataclass +class CredentialAsserted: + """ + Triggered when a credential is used in a webauthn assertion. + """ + + authenticator_id: AuthenticatorId + credential: Credential + + @classmethod + def from_json(cls, json: T_JSON_DICT) -> CredentialAsserted: + return cls( + authenticator_id=AuthenticatorId.from_json(json["authenticatorId"]), + credential=Credential.from_json(json["credential"]), + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/_contradict.html b/docs/_build/html/_modules/nodriver/core/_contradict.html new file mode 100644 index 0000000..0e974d0 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/_contradict.html @@ -0,0 +1,427 @@ + + + + + + + + nodriver.core._contradict - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core._contradict

+import re
+import warnings as _warnings
+from collections.abc import Mapping as _Mapping, Sequence as _Sequence
+import logging
+
+__logger__ = logging.getLogger(__name__)
+
+
+__all__ = ["cdict", "ContraDict"]
+
+
+
+[docs] +def cdict(*args, **kwargs): + """ + factory function + """ + return ContraDict(*args, **kwargs)
+ + + +class ContraDict(dict): + """ + directly inherited from dict + + accessible by attribute. o.x == o['x'] + This works also for all corner cases. + + native json.dumps and json.loads work with it + + names like "keys", "update", "values" etc won't overwrite the methods, + but will just be available using dict lookup notation obj['items'] instead of obj.items + + all key names are converted to snake_case + hyphen's (-), dot's (.) or whitespaces are replaced by underscore (_) + + autocomplete works even if the objects comes from a list + + recursive action. dict assignments will be converted too. + """ + + __module__ = None + + def __init__(self, *args, **kwargs): + super().__init__() + silent = kwargs.pop("silent", False) + _ = dict(*args, **kwargs) + + # for key, val in dict(*args, **kwargs).items(): + # _[key] = val + super().__setattr__("__dict__", self) + for k, v in _.items(): + _check_key(k, self, False, silent) + super().__setitem__(k, _wrap(self.__class__, v)) + + def __setitem__(self, key, value): + super().__setitem__(key, _wrap(self.__class__, value)) + + def __setattr__(self, key, value): + super().__setitem__(key, _wrap(self.__class__, value)) + + def __getattribute__(self, attribute): + if attribute in self: + return self[attribute] + if not _check_key(attribute, self, True, silent=True): + return getattr(super(), attribute) + + return object.__getattribute__(self, attribute) + + +def _wrap(cls, v): + if isinstance(v, _Mapping): + v = cls(v) + + elif isinstance(v, _Sequence) and not isinstance( + v, (str, bytes, bytearray, set, tuple) + ): + v = list([_wrap(cls, x) for x in v]) + return v + + +_warning_names = ( + "items", + "keys", + "values", + "update", + "clear", + "copy", + "fromkeys", + "get", + "items", + "keys", + "pop", + "popitem", + "setdefault", + "update", + "values", + "class", +) + +_warning_names_message = """\n\ + While creating a ContraDict object, a key offending key name '{0}' has been found, which might behave unexpected. + you will only be able to look it up using key, eg. myobject['{0}']. myobject.{0} will not work with that name. + """ + + +def _check_key(key: str, mapping: _Mapping, boolean: bool = False, silent=False): + """checks `key` and warns if needed + + :param key: + :param boolean: return True or False instead of passthrough + :return: + """ + e = None + if not isinstance(key, (str,)): + if boolean: + return True + return key + if key.lower() in _warning_names or any(_ in key for _ in ("-", ".")): + if not silent: + _warnings.warn(_warning_names_message.format(key)) + e = True + if not boolean: + return key + return not e +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/browser.html b/docs/_build/html/_modules/nodriver/core/browser.html new file mode 100644 index 0000000..0af946a --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/browser.html @@ -0,0 +1,1155 @@ + + + + + + + + nodriver.core.browser - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core.browser

+from __future__ import annotations
+
+import asyncio
+import atexit
+import json
+import logging
+import os
+import pickle
+import pathlib
+import typing
+import urllib.parse
+import urllib.request
+import warnings
+from collections import defaultdict
+from typing import List, Union, Tuple
+
+from .. import cdp
+from . import util
+from . import tab
+from ._contradict import ContraDict
+from .config import PathLike, Config, is_posix
+from .connection import Connection
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Browser: + """ + The Browser object is the "root" of the hierarchy and contains a reference + to the browser parent process. + there should usually be only 1 instance of this. + + All opened tabs, extra browser screens and resources will not cause a new Browser process, + but rather create additional :class:`nodriver.Tab` objects. + + So, besides starting your instance and first/additional tabs, you don't actively use it a lot under normal conditions. + + Tab objects will represent and control + - tabs (as you know them) + - browser windows (new window) + - iframe + - background processes + + note: + the Browser object is not instantiated by __init__ but using the asynchronous :meth:`nodriver.Browser.create` method. + + note: + in Chromium based browsers, there is a parent process which keeps running all the time, even if + there are no visible browser windows. sometimes it's stubborn to close it, so make sure after using + this library, the browser is correctly and fully closed/exited/killed. + + """ + + _process: asyncio.subprocess.Process + _process_pid: int + _http: HTTPApi = None + _cookies: CookieJar = None + + config: Config + connection: Connection + +
+[docs] + @classmethod + async def create( + cls, + config: Config = None, + *, + user_data_dir: PathLike = None, + headless: bool = False, + browser_executable_path: PathLike = None, + browser_args: List[str] = None, + sandbox: bool = True, + host: str = None, + port: int = None, + **kwargs, + ) -> Browser: + """ + entry point for creating an instance + """ + if not config: + config = Config( + user_data_dir=user_data_dir, + headless=headless, + browser_executable_path=browser_executable_path, + browser_args=browser_args or [], + sandbox=sandbox, + host=host, + port=port, + **kwargs, + ) + instance = cls(config) + await instance.start() + return instance
+ + + def __init__(self, config: Config, **kwargs): + """ + constructor. to create a instance, use :py:meth:`Browser.create(...)` + + :param config: + """ + + try: + asyncio.get_running_loop() + except RuntimeError: + raise RuntimeError( + "{0} objects of this class are created using await {0}.create()".format( + self.__class__.__name__ + ) + ) + # weakref.finalize(self, self._quit, self) + self.config = config + + self.targets: List = [] + """current targets (all types""" + self.info = None + self._target = None + self._process = None + self._process_pid = None + self._keep_user_data_dir = None + self._is_updating = asyncio.Event() + self.connection: Connection = None + logger.debug("Session object initialized: %s" % vars(self)) + + @property + def websocket_url(self): + return self.info.webSocketDebuggerUrl + + @property + def main_tab(self) -> tab.Tab: + """returns the target which was launched with the browser""" + return sorted(self.targets, key=lambda x: x.type_ == "page", reverse=True)[0] + + @property + def tabs(self) -> List[tab.Tab]: + """returns the current targets which are of type "page" + :return: + """ + tabs = filter(lambda item: item.type_ == "page", self.targets) + return list(tabs) + + @property + def cookies(self) -> CookieJar: + if not self._cookies: + self._cookies = CookieJar(self) + return self._cookies + + @property + def stopped(self): + if self._process and self._process.returncode is None: + return False + return True + # return (self._process and self._process.returncode) or False + +
+[docs] + async def wait(self, time: Union[float, int] = 1) -> Browser: + """wait for <time> seconds. important to use, especially in between page navigation + + :param time: + :return: + """ + return await asyncio.sleep(time, result=self)
+ + + sleep = wait + """alias for wait""" + + def _handle_target_update( + self, + event: Union[ + cdp.target.TargetInfoChanged, + cdp.target.TargetDestroyed, + cdp.target.TargetCreated, + cdp.target.TargetCrashed, + ], + ): + """this is an internal handler which updates the targets when chrome emits the corresponding event""" + + if isinstance(event, cdp.target.TargetInfoChanged): + target_info = event.target_info + + current_tab = next( + filter( + lambda item: item.target_id == target_info.target_id, self.targets + ) + ) + current_target = current_tab.target + + if logger.getEffectiveLevel() <= 10: + changes = util.compare_target_info(current_target, target_info) + changes_string = "" + for change in changes: + key, old, new = change + changes_string += f"\n{key}: {old} => {new}\n" + logger.debug( + "target #%d has changed: %s" + % (self.targets.index(current_tab), changes_string) + ) + + current_tab.target = target_info + + elif isinstance(event, cdp.target.TargetCreated): + target_info: cdp.target.TargetInfo = event.target_info + from .tab import Tab + + new_target = Tab( + ( + f"ws://{self.config.host}:{self.config.port}" + f"/devtools/page" # all types are 'page' internally in chrome apparently + f"/{target_info.target_id}" + ), + target=target_info, + browser=self, + ) + + self.targets.append(new_target) + + logger.debug("target #%d created => %s", len(self.targets), new_target) + + elif isinstance(event, cdp.target.TargetDestroyed): + current_tab = next( + filter(lambda item: item.target_id == event.target_id, self.targets) + ) + logger.debug( + "target removed. id # %d => %s" + % (self.targets.index(current_tab), current_tab) + ) + self.targets.remove(current_tab) + +
+[docs] + async def get( + self, url="chrome://welcome", new_tab: bool = False, new_window: bool = False + ) -> tab.Tab: + """top level get. utilizes the first tab to retrieve given url. + + convenience function known from selenium. + this function handles waits/sleeps and detects when DOM events fired, so it's the safest + way of navigating. + + :param url: the url to navigate to + :param new_tab: open new tab + :param new_window: open new window + :return: Page + """ + if new_tab or new_window: + # creat new target using the browser session + target_id = await self.connection.send( + cdp.target.create_target( + url, new_window=new_window, enable_begin_frame_control=True + ) + ) + # get the connection matching the new target_id from our inventory + connection = next( + filter( + lambda item: item.type_ == "page" and item.target_id == target_id, + self.targets, + ) + ) + + else: + # first tab from browser.tabs + connection = next(filter(lambda item: item.type_ == "page", self.targets)) + # use the tab to navigate to new url + frame_id, loader_id, *_ = await connection.send(cdp.page.navigate(url)) + # update the frame_id on the tab + connection.frame_id = frame_id + + await connection.sleep(0.25) + return connection
+ + +
+[docs] + async def start(self=None) -> Browser: + """launches the actual browser""" + if not self: + warnings.warn("use ``await Browser.create()`` to create a new instance") + return + if self._process or self._process_pid: + if self._process.returncode is not None: + return await self.create(config=self.config) + warnings.warn("ignored! this call has no effect when already running.") + return + + # self.config.update(kwargs) + connect_existing = False + if self.config.host is not None and self.config.port is not None: + connect_existing = True + else: + self.config.host = "127.0.0.1" + self.config.port = util.free_port() + + if not connect_existing: + logger.debug( + "BROWSER EXECUTABLE PATH: %s", self.config.browser_executable_path + ) + if not pathlib.Path(self.config.browser_executable_path).exists(): + raise FileNotFoundError( + ( + """ + --------------------- + Could not determine browser executable. + --------------------- + Make sure your browser is installed in the default location (path). + If you are sure about the browser executable, you can specify it using + the `browser_executable_path='{}` parameter.""" + ).format( + "/path/to/browser/executable" + if is_posix + else "c:/path/to/your/browser.exe" + ) + ) + + if getattr(self.config, "_extensions", None): # noqa + self.config.add_argument( + "--load-extension=%s" + % ",".join(str(_) for _ in self.config._extensions) + ) # noqa + + exe = self.config.browser_executable_path + params = self.config() + + logger.info( + "starting\n\texecutable :%s\n\narguments:\n%s", exe, "\n\t".join(params) + ) + if not connect_existing: + self._process: asyncio.subprocess.Process = ( + await asyncio.create_subprocess_exec( + # self.config.browser_executable_path, + # *cmdparams, + exe, + *params, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + close_fds=is_posix, + ) + ) + + self._process_pid = self._process.pid + logger.info("created process with pid %d " % self._process_pid) + + self._http = HTTPApi((self.config.host, self.config.port)) + + util.get_registered_instances().add(self) + + await asyncio.sleep(0.25) + for _ in range(5): + try: + self.info = ContraDict(await self._http.get("version"), silent=True) + except (Exception,): + if _ == 4: + logger.debug("could not start", exc_info=True) + await self.sleep(0.5) + else: + break + + if not self.info: + raise Exception( + ( + """ + --------------------- + Failed to connect to browser + --------------------- + One of the causes could be when you are running as root. + In that case you need to pass no_sandbox=True + """ + ) + ) + + self.connection = Connection(self.info.webSocketDebuggerUrl, _owner=self) + + if self.config.autodiscover_targets: + logger.info("enabling autodiscover targets") + + # self.connection.add_handler( + # cdp.target.TargetInfoChanged, self._handle_target_update + # ) + # self.connection.add_handler( + # cdp.target.TargetCreated, self._handle_target_update + # ) + # self.connection.add_handler( + # cdp.target.TargetDestroyed, self._handle_target_update + # ) + # self.connection.add_handler( + # cdp.target.TargetCreated, self._handle_target_update + # ) + # + self.connection.handlers[cdp.target.TargetInfoChanged] = [ + self._handle_target_update + ] + self.connection.handlers[cdp.target.TargetCreated] = [ + self._handle_target_update + ] + self.connection.handlers[cdp.target.TargetDestroyed] = [ + self._handle_target_update + ] + self.connection.handlers[cdp.target.TargetCrashed] = [ + self._handle_target_update + ] + await self.connection.send(cdp.target.set_discover_targets(discover=True)) + await self
+ + # self.connection.handlers[cdp.inspector.Detached] = [self.stop] + # return self + +
+[docs] + async def grant_all_permissions(self): + """ + grant permissions for: + accessibilityEvents + audioCapture + backgroundSync + backgroundFetch + clipboardReadWrite + clipboardSanitizedWrite + displayCapture + durableStorage + geolocation + idleDetection + localFonts + midi + midiSysex + nfc + notifications + paymentHandler + periodicBackgroundSync + protectedMediaIdentifier + sensors + storageAccess + topLevelStorageAccess + videoCapture + videoCapturePanTiltZoom + wakeLockScreen + wakeLockSystem + windowManagement + """ + permissions = list(cdp.browser.PermissionType) + permissions.remove(cdp.browser.PermissionType.FLASH) + permissions.remove(cdp.browser.PermissionType.CAPTURED_SURFACE_CONTROL) + await self.connection.send(cdp.browser.grant_permissions(permissions))
+ + +
+[docs] + async def tile_windows(self, windows=None, max_columns: int = 0): + import mss + import math + + m = mss.mss() + screen, screen_width, screen_height = 3 * (None,) + if m.monitors and len(m.monitors) >= 1: + screen = m.monitors[0] + screen_width = screen["width"] + screen_height = screen["height"] + if not screen or not screen_width or not screen_height: + warnings.warn("no monitors detected") + return + await self + distinct_windows = defaultdict(list) + + if windows: + tabs = windows + else: + tabs = self.tabs + for tab in tabs: + window_id, bounds = await tab.get_window() + distinct_windows[window_id].append(tab) + + num_windows = len(distinct_windows) + req_cols = max_columns or int(num_windows * (19 / 6)) + req_rows = int(num_windows / req_cols) + + while req_cols * req_rows < num_windows: + req_rows += 1 + + box_w = math.floor((screen_width / req_cols) - 1) + box_h = math.floor(screen_height / req_rows) + + distinct_windows_iter = iter(distinct_windows.values()) + grid = [] + for x in range(req_cols): + for y in range(req_rows): + num = x + y + try: + tabs = next(distinct_windows_iter) + except StopIteration: + continue + if not tabs: + continue + tab = tabs[0] + + try: + pos = [x * box_w, y * box_h, box_w, box_h] + grid.append(pos) + await tab.set_window_size(*pos) + except Exception: + logger.info( + "could not set window size. exception => ", exc_info=True + ) + continue + return grid
+ + + async def _get_targets(self) -> List[cdp.target.TargetInfo]: + info = await self.connection.send(cdp.target.get_targets(), _is_update=True) + return info + +
+[docs] + async def update_targets(self): + targets: List[cdp.target.TargetInfo] + targets = await self._get_targets() + + for t in targets: + for existing_tab in self.targets: + existing_target = existing_tab.target + if existing_target.target_id == t.target_id: + existing_tab.target.__dict__.update(t.__dict__) + break + else: + + self.targets.append( + Connection( + ( + f"ws://{self.config.host}:{self.config.port}" + f"/devtools/page" # all types are 'page' somehow + f"/{t.target_id}" + ), + target=t, + _owner=self, + ) + ) + + await asyncio.sleep(0)
+ + + async def __aenter__(self): + + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if exc_type and exc_val: + raise exc_type(exc_val) + + def __iter__(self): + self._i = self.tabs.index(self.main_tab) + return self + + def __next__(self): + try: + return self.tabs[self._i] + except IndexError: + del self._i + raise StopIteration + except AttributeError: + del self._i + raise StopIteration + finally: + if hasattr(self, "_i"): + if self._i != len(self.tabs): + self._i += 1 + else: + del self._i + +
+[docs] + def stop(self): + try: + # asyncio.get_running_loop().create_task(self.connection.send(cdp.browser.close())) + + asyncio.get_event_loop().create_task(self.connection.aclose()) + logger.debug("closed the connection using get_event_loop().create_task()") + except RuntimeError: + if self.connection: + try: + # asyncio.run(self.connection.send(cdp.browser.close())) + asyncio.run(self.connection.aclose()) + logger.debug("closed the connection using asyncio.run()") + except Exception: + pass + + for _ in range(3): + try: + self._process.terminate() + logger.info( + "terminated browser with pid %d successfully" % self._process.pid + ) + break + except (Exception,): + try: + self._process.kill() + logger.info( + "killed browser with pid %d successfully" % self._process.pid + ) + break + except (Exception,): + try: + if hasattr(self, "browser_process_pid"): + os.kill(self._process_pid, 15) + logger.info( + "killed browser with pid %d using signal 15 successfully" + % self._process.pid + ) + break + except (TypeError,): + logger.info("typerror", exc_info=True) + pass + except (PermissionError,): + logger.info( + "browser already stopped, or no permission to kill. skip" + ) + pass + except (ProcessLookupError,): + logger.info("process lookup failure") + pass + except (Exception,): + raise + self._process = None + self._process_pid = None
+ + + def __await__(self): + # return ( asyncio.sleep(0)).__await__() + return self.update_targets().__await__() + + def __del__(self): + pass
+ + + +class CookieJar: + def __init__(self, browser: Browser): + self._browser = browser + # self._connection = connection + + async def get_all( + self, requests_cookie_format: bool = False + ) -> List[Union[cdp.network.Cookie, "http.cookiejar.Cookie"]]: + """ + get all cookies + + :param requests_cookie_format: when True, returns python http.cookiejar.Cookie objects, compatible with requests library and many others. + :type requests_cookie_format: bool + :return: + :rtype: + + """ + connection = None + for tab in self._browser.tabs: + if tab.closed: + continue + connection = tab + break + else: + connection = self._browser.connection + cookies = await connection.send(cdp.storage.get_cookies()) + if requests_cookie_format: + import requests.cookies + + return [ + requests.cookies.create_cookie( + name=c.name, + value=c.value, + domain=c.domain, + path=c.path, + expires=c.expires, + secure=c.secure, + ) + for c in cookies + ] + return cookies + + async def set_all(self, cookies: List[cdp.network.CookieParam]): + """ + set cookies + + :param cookies: list of cookies + :type cookies: + :return: + :rtype: + """ + connection = None + for tab in self._browser.tabs: + if tab.closed: + continue + connection = tab + break + else: + connection = self._browser.connection + cookies = await connection.send(cdp.storage.get_cookies()) + await connection.send(cdp.storage.set_cookies(cookies)) + + async def save(self, file: PathLike = ".session.dat", pattern: str = ".*"): + """ + save all cookies (or a subset, controlled by `pattern`) to a file to be restored later + + :param file: + :type file: + :param pattern: regex style pattern string. + any cookie that has a domain, key or value field which matches the pattern will be included. + default = ".*" (all) + + eg: the pattern "(cf|.com|nowsecure)" will include those cookies which: + - have a string "cf" (cloudflare) + - have ".com" in them, in either domain, key or value field. + - contain "nowsecure" + :type pattern: str + :return: + :rtype: + """ + import re + + pattern = re.compile(pattern) + save_path = pathlib.Path(file).resolve() + connection = None + for tab in self._browser.tabs: + if tab.closed: + continue + connection = tab + break + else: + connection = self._browser.connection + cookies = await connection.send(cdp.storage.get_cookies()) + # if not connection: + # return + # if not connection.websocket: + # return + # if connection.websocket.closed: + # return + cookies = await self.get_all(requests_cookie_format=False) + included_cookies = [] + for cookie in cookies: + for match in pattern.finditer(str(cookie.__dict__)): + logger.debug( + "saved cookie for matching pattern '%s' => (%s: %s)", + pattern.pattern, + cookie.name, + cookie.value, + ) + included_cookies.append(cookie) + break + pickle.dump(cookies, save_path.open("w+b")) + + async def load(self, file: PathLike = ".session.dat", pattern: str = ".*"): + """ + load all cookies (or a subset, controlled by `pattern`) from a file created by :py:meth:`~save_cookies`. + + :param file: + :type file: + :param pattern: regex style pattern string. + any cookie that has a domain, key or value field which matches the pattern will be included. + default = ".*" (all) + + eg: the pattern "(cf|.com|nowsecure)" will include those cookies which: + - have a string "cf" (cloudflare) + - have ".com" in them, in either domain, key or value field. + - contain "nowsecure" + :type pattern: str + :return: + :rtype: + """ + import re + + pattern = re.compile(pattern) + save_path = pathlib.Path(file).resolve() + cookies = pickle.load(save_path.open("r+b")) + included_cookies = [] + connection = None + for tab in self._browser.tabs: + if tab.closed: + continue + connection = tab + break + else: + connection = self._browser.connection + for cookie in cookies: + for match in pattern.finditer(str(cookie.__dict__)): + included_cookies.append(cookie) + logger.debug( + "loaded cookie for matching pattern '%s' => (%s: %s)", + pattern.pattern, + cookie.name, + cookie.value, + ) + break + await connection.send(cdp.storage.set_cookies(included_cookies)) + + async def clear(self): + """ + clear current cookies + + note: this includes all open tabs/windows for this browser + + :return: + :rtype: + """ + connection = None + for tab in self._browser.tabs: + if tab.closed: + continue + connection = tab + break + else: + connection = self._browser.connection + cookies = await connection.send(cdp.storage.get_cookies()) + await connection.send(cdp.storage.clear_cookies()) + + +class HTTPApi: + def __init__(self, addr: Tuple[str, int]): + self.host, self.port = addr + self.api = "http://%s:%d" % (self.host, self.port) + + @classmethod + def from_target(cls, target: "Target"): + ws_url = urllib.parse.urlparse(target.websocket_url) + inst = cls((ws_url.hostname, ws_url.port)) + return inst + + async def get(self, endpoint: str): + return await self._request(endpoint) + + async def post(self, endpoint, data): + return await self._request(endpoint, data) + + async def _request(self, endpoint, method: str = "get", data: dict = None): + url = urllib.parse.urljoin( + self.api, f"json/{endpoint}" if endpoint else "/json" + ) + if data and method.lower() == "get": + raise ValueError("get requests cannot contain data") + if not url: + url = self.api + endpoint + request = urllib.request.Request(url) + request.method = method + request.data = None + if data: + request.data = json.dumps(data).encode("utf-8") + + response = await asyncio.get_running_loop().run_in_executor( + None, lambda: urllib.request.urlopen(request, timeout=10) + ) + return json.loads(response.read()) + + +atexit.register(util.deconstruct_browser) +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/config.html b/docs/_build/html/_modules/nodriver/core/config.html new file mode 100644 index 0000000..0a8bd29 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/config.html @@ -0,0 +1,626 @@ + + + + + + + + nodriver.core.config - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core.config

+import logging
+import os
+import pathlib
+import secrets
+import sys
+import tempfile
+from typing import Union, List, Optional
+from types import MethodType
+import zipfile
+import tempfile
+from ._contradict import ContraDict
+
+__all__ = [
+    "Config",
+    "find_chrome_executable",
+    "temp_profile_dir",
+    "is_root",
+    "is_posix",
+    "PathLike",
+]
+
+logger = logging.getLogger(__name__)
+is_posix = sys.platform.startswith(("darwin", "cygwin", "linux", "linux2"))
+
+PathLike = Union[str, pathlib.Path]
+AUTO = None
+
+
+
+[docs] +class Config: + """ + Config object + """ + + def __init__( + self, + user_data_dir: Optional[PathLike] = AUTO, + headless: Optional[bool] = False, + browser_executable_path: Optional[PathLike] = AUTO, + browser_args: Optional[List[str]] = AUTO, + sandbox: Optional[bool] = True, + lang: Optional[str] = "en-US", + host: str = AUTO, + port: int = AUTO, + **kwargs: dict, + ): + """ + creates a config object. + Can be called without any arguments to generate a best-practice config, which is recommended. + + calling the object, eg : myconfig() , will return the list of arguments which + are provided to the browser. + + additional arguments can be added using the :py:obj:`~add_argument method` + + Instances of this class are usually not instantiated by end users. + + :param user_data_dir: the data directory to use + :param headless: set to True for headless mode + :param browser_executable_path: specify browser executable, instead of using autodetect + :param browser_args: forwarded to browser executable. eg : ["--some-chromeparam=somevalue", "some-other-param=someval"] + :param sandbox: disables sandbox + :param autodiscover_targets: use autodiscovery of targets + :param lang: language string to use other than the default "en-US,en;q=0.9" + :param kwargs: + + :type user_data_dir: PathLike + :type headless: bool + :type browser_executable_path: PathLike + :type browser_args: list[str] + :type sandbox: bool + :type lang: str + :type kwargs: dict + """ + + if not browser_args: + browser_args = [] + + if not user_data_dir: + self._user_data_dir = temp_profile_dir() + self._custom_data_dir = False + else: + self.user_data_dir = user_data_dir + + if not browser_executable_path: + browser_executable_path = find_chrome_executable() + + self._browser_args = browser_args + + self.browser_executable_path = browser_executable_path + self.headless = headless + self.sandbox = sandbox + self.host = host + self.port = port + self._extensions = [] + # when using posix-ish operating system and running as root + # you must use no_sandbox = True, which in case is corrected here + if is_posix and is_root() and sandbox: + logger.info("detected root usage, autoo disabling sandbox mode") + self.sandbox = False + + self.autodiscover_targets = True + self.lang = lang + + # other keyword args will be accessible by attribute + self.__dict__.update(kwargs) + super().__init__() + self._default_browser_args = [ + "--remote-allow-origins=*", + "--no-first-run", + "--no-service-autorun", + "--no-default-browser-check", + "--homepage=about:blank", + "--no-pings", + "--password-store=basic", + "--disable-infobars", + "--disable-breakpad", + "--disable-component-update", + "--disable-backgrounding-occluded-windows", + "--disable-renderer-backgrounding", + "--disable-background-networking", + "--disable-dev-shm-usage", + "--disable-features=IsolateOrigins,site-per-process", + "--disable-session-crashed-bubble", + ] + + @property + def browser_args(self): + return sorted(self._default_browser_args + self._browser_args) + + @property + def user_data_dir(self): + return self._user_data_dir + + @user_data_dir.setter + def user_data_dir(self, path: PathLike): + self._user_data_dir = str(path) + self._custom_data_dir = True + + @property + def uses_custom_data_dir(self) -> bool: + return self._custom_data_dir + +
+[docs] + def add_extension(self, extension_path: PathLike): + """ + adds an extension to load, you could point extension_path + to a folder (containing the manifest), or extension file (crx) + + :param extension_path: + :type extension_path: + :return: + :rtype: + """ + path = pathlib.Path(extension_path) + + if not path.exists(): + raise FileNotFoundError("could not find anything here: %s" % str(path)) + + if path.is_file(): + tf = tempfile.mkdtemp(prefix=f"extension_", suffix=secrets.token_hex(4)) + with zipfile.ZipFile(path, "r") as z: + z.extractall(tf) + self._extensions.append(tf) + + elif path.is_dir(): + for item in path.rglob("manifest.*"): + path = item.parent + self._extensions.append(path)
+ + + # def __getattr__(self, item): + # if item not in self.__dict__: + + def __call__(self): + # the host and port will be added when starting + # the browser, as by the time it starts, the port + # is probably already taken + args = self._default_browser_args.copy() + args += ["--user-data-dir=%s" % self.user_data_dir] + args += ["--disable-features=IsolateOrigins,site-per-process"] + args += ["--disable-session-crashed-bubble"] + + if self._browser_args: + args.extend([arg for arg in self._browser_args if arg not in args]) + if self.headless: + args.append("--headless=new") + if not self.sandbox: + args.append("--no-sandbox") + if self.host: + args.append("--remote-debugging-host=%s" % self.host) + if self.port: + args.append("--remote-debugging-port=%s" % self.port) + return args + +
+[docs] + def add_argument(self, arg: str): + if any( + x in arg.lower() + for x in [ + "headless", + "data-dir", + "data_dir", + "no-sandbox", + "no_sandbox", + "lang", + ] + ): + raise ValueError( + '"%s" not allowed. please use one of the attributes of the Config object to set it' + % arg + ) + self._browser_args.append(arg)
+ + + def __repr__(self): + s = f"{self.__class__.__name__}" + for k, v in ({**self.__dict__, **self.__class__.__dict__}).items(): + if k[0] == "_": + continue + if not v: + continue + if isinstance(v, property): + v = getattr(self, k) + if callable(v): + continue + s += f"\n\t{k} = {v}" + return s
+ + + # d = self.__dict__.copy() + # d.pop("browser_args") + # d["browser_args"] = self() + # return d + + +def is_root(): + """ + helper function to determine if user trying to launch chrome + under linux as root, which needs some alternative handling + :return: + :rtype: + """ + import ctypes, os + + try: + return os.getuid() == 0 + except AttributeError: + return ctypes.windll.shell32.IsUserAnAdmin() != 0 + + +def temp_profile_dir(): + """generate a temp dir (path)""" + path = os.path.normpath(tempfile.mkdtemp(prefix="uc_")) + return path + + +def find_chrome_executable(return_all=False): + """ + Finds the chrome, beta, canary, chromium executable + and returns the disk path + """ + candidates = [] + if is_posix: + for item in os.environ.get("PATH").split(os.pathsep): + for subitem in ( + "google-chrome", + "chromium", + "chromium-browser", + "chrome", + "google-chrome-stable", + ): + candidates.append(os.sep.join((item, subitem))) + if "darwin" in sys.platform: + candidates += [ + "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", + "/Applications/Chromium.app/Contents/MacOS/Chromium", + ] + + else: + for item in map( + os.environ.get, + ("PROGRAMFILES", "PROGRAMFILES(X86)", "LOCALAPPDATA", "PROGRAMW6432"), + ): + if item is not None: + for subitem in ( + "Google/Chrome/Application", + "Google/Chrome Beta/Application", + "Google/Chrome Canary/Application", + ): + candidates.append(os.sep.join((item, subitem, "chrome.exe"))) + rv = [] + for candidate in candidates: + if os.path.exists(candidate) and os.access(candidate, os.X_OK): + logger.debug("%s is a valid candidate... " % candidate) + rv.append(candidate) + else: + logger.debug( + "%s is not a valid candidate because don't exist or not executable " + % candidate + ) + + winner = None + + if return_all and rv: + return rv + + if rv and len(rv) > 1: + # assuming the shortest path wins + winner = min(rv, key=lambda x: len(x)) + + elif len(rv) == 1: + winner = rv[0] + + if winner: + return os.path.normpath(winner) + + raise FileNotFoundError( + "could not find a valid chrome browser binary. please make sure chrome is installed." + "or use the keyword argument 'browser_executable_path=/path/to/your/browser' " + ) +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/connection.html b/docs/_build/html/_modules/nodriver/core/connection.html new file mode 100644 index 0000000..241356c --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/connection.html @@ -0,0 +1,918 @@ + + + + + + + + nodriver.core.connection - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core.connection

+from __future__ import annotations
+
+import asyncio
+import collections
+import functools
+import inspect
+import itertools
+import json
+import logging
+import sys
+import types
+from asyncio import iscoroutine, iscoroutinefunction
+from typing import (
+    Generator,
+    Union,
+    Awaitable,
+    Callable,
+    Any,
+    TypeVar,
+)
+
+import websockets
+
+from . import util
+from .. import cdp
+
+T = TypeVar("T")
+
+GLOBAL_DELAY = 0.005
+MAX_SIZE: int = 2**28
+PING_TIMEOUT: int = 900  # 15 minutes
+
+TargetType = Union[cdp.target.TargetInfo, cdp.target.TargetID]
+
+logger = logging.getLogger("uc.connection")
+
+
+class ProtocolException(Exception):
+    def __init__(self, *args, **kwargs):  # real signature unknown
+
+        self.message = None
+        self.code = None
+        self.args = args
+        if isinstance(args[0], dict):
+
+            self.message = args[0].get("message", None)  # noqa
+            self.code = args[0].get("code", None)
+
+        elif hasattr(args[0], "to_json"):
+
+            def serialize(obj, _d=0):
+                res = "\n"
+                for k, v in obj.items():
+                    space = "\t" * _d
+                    if isinstance(v, dict):
+                        res += f"{space}{k}: {serialize(v, _d + 1)}\n"
+                    else:
+                        res += f"{space}{k}: {v}\n"
+
+                return res
+
+            self.message = serialize(args[0].to_json())
+
+        else:
+            self.message = "| ".join(str(x) for x in args)
+
+    def __str__(self):
+        return f"{self.message} [code: {self.code}]" if self.code else f"{self.message}"
+
+
+class SettingClassVarNotAllowedException(PermissionError):
+    pass
+
+
+class Transaction(asyncio.Future):
+    __cdp_obj__: Generator = None
+
+    method: str = None
+    params: dict = None
+
+    id: int = None
+
+    def __init__(self, cdp_obj: Generator):
+        """
+        :param cdp_obj:
+        """
+        super().__init__()
+        self.__cdp_obj__ = cdp_obj
+        self.connection = None
+
+        self.method, *params = next(self.__cdp_obj__).values()
+        if params:
+            params = params.pop()
+        self.params = params
+
+    @property
+    def message(self):
+        return json.dumps({"method": self.method, "params": self.params, "id": self.id})
+
+    @property
+    def has_exception(self):
+        try:
+            if self.exception():
+                return True
+        except:  # noqa
+            return True
+        return False
+
+    def __call__(self, **response: dict):
+        """
+        parsed the response message and marks the future
+        complete
+
+        :param response:
+        :return:
+        """
+
+        if "error" in response:
+            # set exception and bail out
+            return self.set_exception(ProtocolException(response["error"]))
+        try:
+            # try to parse the result according to the py cdp docs.
+            self.__cdp_obj__.send(response["result"])
+        except StopIteration as e:
+            # exception value holds the parsed response
+            return self.set_result(e.value)
+        raise ProtocolException("could not parse the cdp response:\n%s" % response)
+
+    def __repr__(self):
+        success = False if (self.done() and self.has_exception) else True
+        if self.done():
+            status = "finished"
+        else:
+            status = "pending"
+        fmt = (
+            f"<{self.__class__.__name__}\n\t"
+            f"method: {self.method}\n\t"
+            f"status: {status}\n\t"
+            f"success: {success}>"
+        )
+        return fmt
+
+
+class EventTransaction(Transaction):
+    event = None
+    value = None
+
+    def __init__(self, event_object):
+        try:
+            super().__init__(None)
+        except:
+            pass
+        self.set_result(event_object)
+        self.event = self.value = self.result()
+
+    def __repr__(self):
+        status = "finished"
+        success = False if self.exception() else True
+        event_object = self.result()
+        fmt = (
+            f"{self.__class__.__name__}\n\t"
+            f"event: {event_object.__class__.__module__}.{event_object.__class__.__name__}\n\t"
+            f"status: {status}\n\t"
+            f"success: {success}>"
+        )
+        return fmt
+
+
+class CantTouchThis(type):
+    def __setattr__(cls, attr, value):
+        """
+        :meta private:
+        """
+        if attr == "__annotations__":
+            # fix autodoc
+            return super().__setattr__(attr, value)
+        raise SettingClassVarNotAllowedException(
+            "\n".join(
+                (
+                    "don't set '%s' on the %s class directly, as those are shared with other objects.",
+                    "use `my_object.%s = %s`  instead",
+                )
+            )
+            % (attr, cls.__name__, attr, value)
+        )
+
+
+class Connection(metaclass=CantTouchThis):
+    attached: bool = None
+    websocket: websockets.WebSocketClientProtocol
+    _target: cdp.target.TargetInfo
+
+    def __init__(
+        self,
+        websocket_url: str,
+        target: cdp.target.TargetInfo = None,
+        _owner: "Browser" = None,
+        **kwargs,
+    ):
+        super().__init__()
+        self._target = target
+        self.__count__ = itertools.count(0)
+        self._owner = _owner
+        self.websocket_url: str = websocket_url
+        self.websocket = None
+        self.mapper = {}
+        self.handlers = collections.defaultdict(list)
+        self.recv_task = None
+        self.enabled_domains = []
+        self._last_result = []
+        self.listener: Listener = None
+        self.__dict__.update(**kwargs)
+
+    @property
+    def target(self) -> cdp.target.TargetInfo:
+        return self._target
+
+    @target.setter
+    def target(self, target: cdp.target.TargetInfo):
+        if not isinstance(target, cdp.target.TargetInfo):
+            raise TypeError(
+                "target must be set to a '%s' but got '%s"
+                % (cdp.target.TargetInfo.__name__, type(target).__name__)
+            )
+        self._target = target
+
+    @property
+    def closed(self):
+        if not self.websocket:
+            return True
+        return self.websocket.closed
+
+    def add_handler(
+        self,
+        event_type_or_domain: Union[type, types.ModuleType],
+        handler: Union[Callable, Awaitable],
+    ):
+        """
+        add a handler for given event
+
+        if event_type_or_domain is a module instead of a type, it will find all available events and add
+        the handler.
+
+        if you want to receive event updates (network traffic are also 'events') you can add handlers for those events.
+        handlers can be regular callback functions or async coroutine functions (and also just lamba's).
+        for example, you want to check the network traffic:
+
+        .. code-block::
+
+            page.add_handler(cdp.network.RequestWillBeSent, lambda event: print('network event => %s' % event.request))
+
+        the next time you make network traffic you will see your console print like crazy.
+
+        :param event_type_or_domain:
+        :type event_type_or_domain:
+        :param handler:
+        :type handler:
+        :return:
+        :rtype:
+        """
+        if isinstance(event_type_or_domain, types.ModuleType):
+            for name, obj in inspect.getmembers_static(event_type_or_domain):
+                if name.isupper():
+                    continue
+                if not name[0].isupper():
+                    continue
+                if type(obj) != type:
+                    continue
+                if inspect.isbuiltin(obj):
+                    continue
+                self.handlers[obj].append(handler)
+            return
+        self.handlers[event_type_or_domain].append(handler)
+
+    async def aopen(self, **kw):
+        """
+        opens the websocket connection. should not be called manually by users
+        :param kw:
+        :return:
+        """
+
+        if not self.websocket or self.websocket.closed:
+            try:
+                self.websocket = await websockets.connect(
+                    self.websocket_url,
+                    ping_timeout=PING_TIMEOUT,
+                    max_size=MAX_SIZE,
+                )
+                self.listener = Listener(self)
+            except (Exception,) as e:
+                logger.debug("exception during opening of websocket : %s", e)
+                if self.listener:
+                    self.listener.cancel()
+                raise
+        if not self.listener or not self.listener.running:
+            self.listener = Listener(self)
+            logger.debug("\n✅  opened websocket connection to %s", self.websocket_url)
+
+        # when a websocket connection is closed (either by error or on purpose)
+        # and reconnected, the registered event listeners (if any), should be
+        # registered again, so the browser sends those events
+
+        await self._register_handlers()
+
+    async def aclose(self):
+        """
+        closes the websocket connection. should not be called manually by users.
+        """
+        if self.websocket and not self.websocket.closed:
+            if self.listener and self.listener.running:
+                self.listener.cancel()
+                self.enabled_domains.clear()
+            await self.websocket.close()
+            logger.debug("\n❌ closed websocket connection to %s", self.websocket_url)
+
+    async def sleep(self, t: Union[int, float] = 0.25):
+        await self.update_target()
+        await asyncio.sleep(t)
+
+    async def wait(self, t: Union[int, float] = None):
+        """
+        waits until the event listener reports idle (no new events received in certain timespan).
+        when `t` is provided, ensures waiting for `t` seconds, no matter what.
+
+        :param t:
+        :type t:
+        :return:
+        :rtype:
+        """
+        await self.update_target()
+        loop = asyncio.get_running_loop()
+        start_time = loop.time()
+        try:
+            if isinstance(t, (int, float)):
+                await asyncio.wait_for(self.listener.idle.wait(), timeout=t)
+                while (loop.time() - start_time) < t:
+                    await asyncio.sleep(0.1)
+            else:
+                await self.listener.idle.wait()
+        except asyncio.TimeoutError:
+            if isinstance(t, (int, float)):
+                # explicit time is given, which is now passed
+                # so bail out early
+                return
+        except AttributeError:
+            # no listener created yet
+            pass
+
+    def __getattr__(self, item):
+        """:meta private:"""
+        try:
+            return getattr(self.target, item)
+        except AttributeError:
+            raise
+
+    async def __aenter__(self):
+        """:meta private:"""
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        """:meta private:"""
+        await self.aclose()
+        if exc_type and exc_val:
+            raise exc_type(exc_val)
+
+    def __await__(self):
+        """
+        updates targets and wait for event listener to report idle.
+        idle is reported when no new events are received for the duration of 1 second
+        :return:
+        :rtype:
+        """
+        return self.wait().__await__()
+
+    async def update_target(self):
+        target_info: cdp.target.TargetInfo = await self.send(
+            cdp.target.get_target_info(self.target_id), _is_update=True
+        )
+        self.target = target_info
+
+    async def send(
+        self, cdp_obj: Generator[dict[str, Any], dict[str, Any], Any], _is_update=False
+    ) -> Any:
+        """
+        send a protocol command. the commands are made using any of the cdp.<domain>.<method>()'s
+        and is used to send custom cdp commands as well.
+
+        :param cdp_obj: the generator object created by a cdp method
+
+        :param _is_update: internal flag
+            prevents infinite loop by skipping the registeration of handlers
+            when multiple calls to connection.send() are made
+        :return:
+        """
+        await self.aopen()
+        if not self.websocket or self.closed:
+            return
+        if not self.listener or not self.listener.running:
+            self.listener = Listener(self)
+        try:
+            tx = Transaction(cdp_obj)
+            tx.connection = self
+            if not self.mapper:
+                self.__count__ = itertools.count(0)
+            tx.id = next(self.__count__)
+            self.mapper.update({tx.id: tx})
+            if not _is_update:
+                await self._register_handlers()
+            await self.websocket.send(tx.message)
+            try:
+                return await tx
+            except ProtocolException as e:
+                e.message += f"\ncommand:{tx.method}\nparams:{tx.params}"
+                raise e
+        except Exception:
+            await self.aclose()
+
+    async def _register_handlers(self):
+        """
+        ensure that for current (event) handlers, the corresponding
+        domain is enabled in the protocol.
+
+        """
+        seen = []
+        # save a copy of current enabled domains in a variable
+        # domains will be removed from this variable
+        # if it is still needed according to the set handlers
+        # so at the end this variable will hold the domains that
+        # are not represented by handlers, and can be removed
+        enabled_domains = self.enabled_domains.copy()
+        for event_type in self.handlers.copy():
+            domain_mod = None
+            if len(self.handlers[event_type]) == 0:
+                self.handlers.pop(event_type)
+                continue
+            if isinstance(event_type, type):
+                domain_mod = util.cdp_get_module(event_type.__module__)
+            if domain_mod in self.enabled_domains:
+                # at this point, the domain is being used by a handler
+                # so remove that domain from temp variable 'enabled_domains' if present
+                if domain_mod in enabled_domains:
+                    enabled_domains.remove(domain_mod)
+                continue
+            elif domain_mod not in self.enabled_domains:
+                if domain_mod in (cdp.target, cdp.storage):
+                    # by default enabled
+                    continue
+                try:
+                    # we add this before sending the request, because it will
+                    # loop indefinite
+                    logger.debug("registered %s", domain_mod)
+                    self.enabled_domains.append(domain_mod)
+
+                    await self.send(domain_mod.enable(), _is_update=True)
+
+                except:  # noqa - as broad as possible, we don't want an error before the "actual" request is sent
+                    logger.debug("", exc_info=True)
+                    try:
+                        self.enabled_domains.remove(domain_mod)
+                    except:  # noqa
+                        logger.debug("NOT GOOD", exc_info=True)
+                        continue
+                finally:
+                    continue
+        for ed in enabled_domains:
+            # we started with a copy of self.enabled_domains and removed a domain from this
+            # temp variable when we registered it or saw handlers for it.
+            # items still present at this point are unused and need removal
+            self.enabled_domains.remove(ed)
+
+
+class Listener:
+    def __init__(self, connection: Connection):
+        self.connection = connection
+        self.history = collections.deque()
+        self.max_history = 1000
+        self.task: asyncio.Future = None
+
+        # when in interactive mode, the loop is paused after each return
+        # and when the next call is made, it might still have to process some events
+        # from the previous call as well.
+
+        # while in "production" the loop keeps running constantly
+        # (and so events are continuous processed)
+
+        # therefore we should give it some breathing room in interactive mode
+        # and we can tighten that room when in production.
+
+        # /example/demo.py runs ~ 5 seconds faster, which is quite a lot.
+
+        is_interactive = getattr(sys, "ps1", sys.flags.interactive)
+        self._time_before_considered_idle = 0.10 if not is_interactive else 0.75
+        self.idle = asyncio.Event()
+        self.run()
+
+    def run(self):
+        self.task = asyncio.create_task(self.listener_loop())
+
+    @property
+    def time_before_considered_idle(self):
+        return self._time_before_considered_idle
+
+    @time_before_considered_idle.setter
+    def time_before_considered_idle(self, seconds: Union[int, float]):
+        self._time_before_considered_idle = seconds
+
+    def cancel(self):
+        if self.task and not self.task.cancelled():
+            self.task.cancel()
+
+    @property
+    def running(self):
+        if not self.task:
+            return False
+        if self.task.done():
+            return False
+        return True
+
+    async def listener_loop(self):
+
+        while True:
+            try:
+                msg = await asyncio.wait_for(
+                    self.connection.websocket.recv(), self.time_before_considered_idle
+                )
+            except asyncio.TimeoutError:
+                self.idle.set()
+                # breathe
+                # await asyncio.sleep(self.time_before_considered_idle / 10)
+                continue
+            except (Exception,) as e:
+                # break on any other exception
+                # which is mostly socket is closed or does not exist
+                # or is not allowed
+
+                logger.debug(
+                    "connection listener exception while reading websocket:\n%s", e
+                )
+                break
+
+            if not self.running:
+                # if we have been cancelled or otherwise stopped running
+                # break this loop
+                break
+
+            # since we are at this point, we are not "idle" anymore.
+            self.idle.clear()
+
+            message = json.loads(msg)
+            if "id" in message:
+                # response to our command
+                if message["id"] in self.connection.mapper:
+                    # get the corresponding Transaction
+
+                    # thanks to zxsleebu for discovering the memory leak
+                    # pop to prevent memory leaks
+
+                    tx = self.connection.mapper.pop(message["id"])
+                    logger.debug("got answer for %s (message_id:%d)", tx, message["id"])
+
+                    # complete the transaction, which is a Future object
+                    # and thus will return to anyone awaiting it.
+                    tx(**message)
+            else:
+                # probably an event
+                try:
+                    event = cdp.util.parse_json_event(message)
+                    event_tx = EventTransaction(event)
+                    if not self.connection.mapper:
+                        self.connection.__count__ = itertools.count(0)
+                    event_tx.id = next(self.connection.__count__)
+                    self.connection.mapper[event_tx.id] = event_tx
+                except Exception as e:
+                    logger.info(
+                        "%s: %s  during parsing of json from event : %s"
+                        % (type(e).__name__, e.args, message),
+                        exc_info=True,
+                    )
+                    continue
+                except KeyError as e:
+                    logger.info("some lousy KeyError %s" % e, exc_info=True)
+                    continue
+                try:
+                    if type(event) in self.connection.handlers:
+                        callbacks = self.connection.handlers[type(event)]
+                    else:
+                        continue
+                    if not len(callbacks):
+                        continue
+                    for callback in callbacks:
+                        try:
+                            if iscoroutinefunction(callback) or iscoroutine(callback):
+                                await callback(event)
+                            else:
+                                callback(event)
+                        except Exception as e:
+                            logger.warning(
+                                "exception in callback %s for event %s => %s",
+                                callback,
+                                event.__class__.__name__,
+                                e,
+                                exc_info=True,
+                            )
+                            raise
+                except asyncio.CancelledError:
+                    break
+                except Exception:
+                    raise
+                continue
+
+    def __repr__(self):
+        s_idle = "[idle]" if self.idle.is_set() else "[busy]"
+        s_cache_length = f"[cache size: {len(self.history)}]"
+        s_running = f"[running: {self.running}]"
+        s = f"{self.__class__.__name__} {s_running} {s_idle} {s_cache_length}>"
+        return s
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/element.html b/docs/_build/html/_modules/nodriver/core/element.html new file mode 100644 index 0000000..3f395f8 --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/element.html @@ -0,0 +1,1548 @@ + + + + + + + + nodriver.core.element - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core.element

+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import pathlib
+import secrets
+import typing
+
+from . import util
+from ._contradict import ContraDict
+from .config import PathLike
+from .. import cdp
+
+logger = logging.getLogger(__name__)
+
+if typing.TYPE_CHECKING:
+    from .tab import Tab
+
+
+def create(node: cdp.dom.Node, tab: Tab, tree: typing.Optional[cdp.dom.Node] = None):
+    """
+    factory for Elements
+    this is used with Tab.query_selector(_all), since we already have the tree,
+    we don't need to fetch it for every single element.
+
+    :param node: cdp dom node representation
+    :type node: cdp.dom.Node
+    :param tab: the target object to which this element belongs
+    :type tab: Tab
+    :param tree: [Optional] the full node tree to which <node> belongs, enhances performance.
+                when not provided, you need to call `await elem.update()` before using .children / .parent
+    :type tree:
+    """
+
+    elem = Element(node, tab, tree)
+
+    return elem
+
+
+
+[docs] +class Element: + def __init__(self, node: cdp.dom.Node, tab: Tab, tree: cdp.dom.Node = None): + """ + Represents an (HTML) DOM Element + + :param node: cdp dom node representation + :type node: cdp.dom.Node + :param tab: the target object to which this element belongs + :type tab: Tab + """ + if not node: + raise Exception("node cannot be None") + self._tab = tab + # if node.node_name == 'IFRAME': + # self._node = node.content_document + # else: + self._node = node + self._tree = tree + self._parent = None + self._remote_object = None + self._attrs = ContraDict(silent=True) + self._make_attrs() + + @property + def tag(self): + if self.node_name: + return self.node_name.lower() + + @property + def tag_name(self): + return self.tag + + @property + def node_id(self): + return self.node.node_id + + @property + def backend_node_id(self): + return self.node.backend_node_id + + @property + def node_type(self): + return self.node.node_type + + @property + def node_name(self): + return self.node.node_name + + @property + def local_name(self): + return self.node.local_name + + @property + def node_value(self): + return self.node.node_value + + @property + def parent_id(self): + return self.node.parent_id + + @property + def child_node_count(self): + return self.node.child_node_count + + @property + def attributes(self): + return self.node.attributes + + @property + def document_url(self): + return self.node.document_url + + @property + def base_url(self): + return self.node.base_url + + @property + def public_id(self): + return self.node.public_id + + @property + def system_id(self): + return self.node.system_id + + @property + def internal_subset(self): + return self.node.internal_subset + + @property + def xml_version(self): + return self.node.xml_version + + @property + def value(self): + return self.node.value + + @property + def pseudo_type(self): + return self.node.pseudo_type + + @property + def pseudo_identifier(self): + return self.node.pseudo_identifier + + @property + def shadow_root_type(self): + return self.node.shadow_root_type + + @property + def frame_id(self): + return self.node.frame_id + + @property + def content_document(self): + return self.node.content_document + + @property + def shadow_roots(self): + return self.node.shadow_roots + + @property + def template_content(self): + return self.node.template_content + + @property + def pseudo_elements(self): + return self.node.pseudo_elements + + @property + def imported_document(self): + return self.node.imported_document + + @property + def distributed_nodes(self): + return self.node.distributed_nodes + + @property + def is_svg(self): + return self.node.is_svg + + @property + def compatibility_mode(self): + return self.node.compatibility_mode + + @property + def assigned_slot(self): + return self.node.assigned_slot + + @property + def tab(self): + return self._tab + + def __getattr__(self, item): + # if attribute is not found on the element python object + # check if it may be present in the element attributes (eg, href=, src=, alt=) + # returns None when attribute is not found + # instead of raising AttributeError + x = getattr(self.attrs, item, None) + if x: + return x + + # x = getattr(self.node, item, None) + # + # return x + + def __setattr__(self, key, value): + if key[0] != "_": + if key[1:] not in vars(self).keys(): + # we probably deal with an attribute of + # the html element, so forward it + self.attrs.__setattr__(key, value) + return + # we probably deal with an attribute of + # the python object + super().__setattr__(key, value) + + def __setitem__(self, key, value): + if key[0] != "_": + if key[1:] not in vars(self).keys(): + # we probably deal with an attribute of + # the html element, so forward it + self.attrs[key] = value + + def __getitem__(self, item): + # we probably deal with an attribute of + # the html element, so forward it + return self.attrs.get(item, None) + +
+[docs] + async def save_to_dom(self): + """ + saves element to dom + :return: + :rtype: + """ + self._remote_object = await self._tab.send( + cdp.dom.resolve_node(backend_node_id=self.backend_node_id) + ) + await self._tab.send(cdp.dom.set_outer_html(self.node_id, outer_html=str(self))) + await self.update()
+ + +
+[docs] + async def remove_from_dom(self): + """removes the element from dom""" + await self.update() # ensure we have latest node_id + node = util.filter_recurse( + self._tree, lambda node: node.backend_node_id == self.backend_node_id + ) + if node: + await self.tab.send(cdp.dom.remove_node(node.node_id))
+ + # self._tree = util.remove_from_tree(self.tree, self.node) + +
+[docs] + async def update(self, _node=None): + """ + updates element to retrieve more properties. for example this enables + :py:obj:`~children` and :py:obj:`~parent` attributes. + + also resolves js opbject which is stored object in :py:obj:`~remote_object` + + usually you will get element nodes by the usage of + + :py:meth:`Tab.query_selector_all()` + + :py:meth:`Tab.find_elements_by_text()` + + those elements are already updated and you can browse through children directly. + + The reason for a seperate call instead of doing it at initialization, + is because when you are retrieving 100+ elements this becomes quite expensive. + + therefore, it is not advised to call this method on a bunch of blocks (100+) at the same time. + + :return: + :rtype: + """ + if _node: + doc = _node + # self._node = _node + # self._children.clear() + self._parent = None + else: + doc = await self._tab.send(cdp.dom.get_document(-1, True)) + self._parent = None + # if self.node_name != "IFRAME": + updated_node = util.filter_recurse( + doc, lambda n: n.backend_node_id == self._node.backend_node_id + ) + if updated_node: + logger.debug("node seems changed, and has now been updated.") + self._node = updated_node + self._tree = doc + + self._remote_object = await self._tab.send( + cdp.dom.resolve_node(backend_node_id=self._node.backend_node_id) + ) + self.attrs.clear() + self._make_attrs() + if self.node_name != "IFRAME": + parent_node = util.filter_recurse( + doc, lambda n: n.node_id == self.node.parent_id + ) + if not parent_node: + # could happen if node is for example <html> + return self + self._parent = create(parent_node, tab=self._tab, tree=self._tree) + return self
+ + + @property + def node(self): + return self._node + + @property + def tree(self) -> cdp.dom.Node: + return self._tree + # raise RuntimeError("you should first call `await update()` on this object to populate it's tree") + + @tree.setter + def tree(self, tree: cdp.dom.Node): + self._tree = tree + + @property + def attrs(self): + """ + attributes are stored here, however, you can set them directly on the element object as well. + :return: + :rtype: + """ + return self._attrs + + @property + def parent(self) -> typing.Union[Element, None]: + """ + get the parent element (node) of current element(node) + :return: + :rtype: + """ + if not self.tree: + raise RuntimeError("could not get parent since the element has no tree set") + parent_node = util.filter_recurse( + self.tree, lambda n: n.node_id == self.parent_id + ) + if not parent_node: + return None + parent_element = create(parent_node, tab=self._tab, tree=self.tree) + return parent_element + + @property + def children(self) -> typing.Union[typing.List[Element], str]: + """ + returns the elements' children. those children also have a children property + so you can browse through the entire tree as well. + :return: + :rtype: + """ + _children = [] + if self._node.node_name == "IFRAME": + # iframes are not exact the same as other nodes + # the children of iframes are found under + # the .content_document property, which is of more + # use than the node itself + frame = self._node.content_document + if not frame.child_node_count: + return [] + for child in frame.children: + child_elem = create(child, self._tab, frame) + if child_elem: + _children.append(child_elem) + # self._node = frame + return _children + elif not self.node.child_node_count: + return [] + if self.node.children: + for child in self.node.children: + child_elem = create(child, self._tab, self.tree) + if child_elem: + _children.append(child_elem) + return _children + + @property + def remote_object(self) -> cdp.runtime.RemoteObject: + return self._remote_object + + @property + def object_id(self) -> cdp.runtime.RemoteObjectId: + try: + return self.remote_object.object_id + except AttributeError: + pass + +
+[docs] + async def click(self): + """ + Click the element. + + :return: + :rtype: + """ + self._remote_object = await self._tab.send( + cdp.dom.resolve_node(backend_node_id=self.backend_node_id) + ) + arguments = [cdp.runtime.CallArgument(object_id=self._remote_object.object_id)] + await self.flash(0.25) + await self._tab.send( + cdp.runtime.call_function_on( + "(el) => el.click()", + object_id=self._remote_object.object_id, + arguments=arguments, + await_promise=True, + user_gesture=True, + return_by_value=True, + ) + )
+ + +
+[docs] + async def get_js_attributes(self): + return ContraDict( + json.loads( + await self.apply( + """ + function (e) { + let o = {} + for(let k in e){ + o[k] = e[k] + } + return JSON.stringify(o) + } + """ + ) + ) + )
+ + + def __await__(self): + return self.update().__await__() + + def __call__(self, js_method): + """ + calling the element object will call a js method on the object + eg, element.play() in case of a video element, it will call .play() + :param js_method: + :type js_method: + :return: + :rtype: + """ + return self.apply(f"(e) => e['{js_method}']()") + +
+[docs] + async def apply(self, js_function, return_by_value=True): + """ + apply javascript to this element. the given js_function string should accept the js element as parameter, + and can be a arrow function, or function declaration. + eg: + - '(elem) => { elem.value = "blabla"; consolelog(elem); alert(JSON.stringify(elem); } ' + - 'elem => elem.play()' + - function myFunction(elem) { alert(elem) } + + :param js_function: the js function definition which received this element. + :type js_function: str + :param return_by_value: + :type return_by_value: + :return: + :rtype: + """ + self._remote_object = await self._tab.send( + cdp.dom.resolve_node(backend_node_id=self.backend_node_id) + ) + result: typing.Tuple[cdp.runtime.RemoteObject, typing.Any] = ( + await self._tab.send( + cdp.runtime.call_function_on( + js_function, + object_id=self._remote_object.object_id, + arguments=[ + cdp.runtime.CallArgument( + object_id=self._remote_object.object_id + ) + ], + return_by_value=True, + user_gesture=True, + ) + ) + ) + if result and result[0]: + if return_by_value: + return result[0].value + return result[0] + elif result[1]: + return result[1]
+ + +
+[docs] + async def get_position(self, abs=False) -> Position: + if not self.parent or not self.object_id: + self._remote_object = await self._tab.send( + cdp.dom.resolve_node(backend_node_id=self.backend_node_id) + ) + # await self.update() + try: + quads = await self.tab.send( + cdp.dom.get_content_quads(object_id=self.remote_object.object_id) + ) + if not quads: + raise Exception("could not find position for %s " % self) + pos = Position(quads[0]) + if abs: + scroll_y = (await self.tab.evaluate("window.scrollY")).value + scroll_x = (await self.tab.evaluate("window.scrollX")).value + abs_x = pos.left + scroll_x + (pos.width / 2) + abs_y = pos.top + scroll_y + (pos.height / 2) + pos.abs_x = abs_x + pos.abs_y = abs_y + return pos + except IndexError: + logger.debug( + "no content quads for %s. mostly caused by element which is not 'in plain sight'" + % self + )
+ + +
+[docs] + async def mouse_click( + self, + button: str = "left", + buttons: typing.Optional[int] = 1, + modifiers: typing.Optional[int] = 0, + hold: bool = False, + _until_event: typing.Optional[type] = None, + ): + """native click (on element) . note: this likely does not work atm, use click() instead + + :param button: str (default = "left") + :param buttons: which button (default 1 = left) + :param modifiers: *(Optional)* Bit field representing pressed modifier keys. + Alt=1, Ctrl=2, Meta/Command=4, Shift=8 (default: 0). + :param _until_event: internal. event to wait for before returning + :return: + + """ + try: + center = (await self.get_position()).center + except AttributeError: + return + if not center: + logger.warning("could not calculate box model for %s", self) + return + + logger.debug("clicking on location %.2f, %.2f" % center) + + await asyncio.gather( + self._tab.send( + cdp.input_.dispatch_mouse_event( + "mousePressed", + x=center[0], + y=center[1], + modifiers=modifiers, + button=cdp.input_.MouseButton(button), + buttons=buttons, + click_count=1, + ) + ), + self._tab.send( + cdp.input_.dispatch_mouse_event( + "mouseReleased", + x=center[0], + y=center[1], + modifiers=modifiers, + button=cdp.input_.MouseButton(button), + buttons=buttons, + click_count=1, + ) + ), + ) + try: + await self.flash() + except: # noqa + pass
+ + +
+[docs] + async def mouse_move(self): + """moves mouse (not click), to element position. when an element has an + hover/mouseover effect, this would trigger it""" + try: + center = (await self.get_position()).center + except AttributeError: + logger.debug("did not find location for %s", self) + return + logger.debug( + "mouse move to location %.2f, %.2f where %s is located", *center, self + ) + await self._tab.send( + cdp.input_.dispatch_mouse_event("mouseMoved", x=center[0], y=center[1]) + ) + await self._tab.sleep(0.05) + await self._tab.send( + cdp.input_.dispatch_mouse_event("mouseReleased", x=center[0], y=center[1]) + )
+ + +
+[docs] + async def mouse_drag( + self, + destination: typing.Union[Element, typing.Tuple[int, int]], + relative: bool = False, + steps: int = 1, + ): + """ + drag an element to another element or target coordinates. dragging of elements should be supported by the site of course + + + :param destination: another element where to drag to, or a tuple (x,y) of ints representing coordinate + :type destination: Element or coordinate as x,y tuple + + :param relative: when True, treats coordinate as relative. for example (-100, 200) will move left 100px and down 200px + :type relative: + + :param steps: move in <steps> points, this could make it look more "natural" (default 1), + but also a lot slower. + for very smooth action use 50-100 + :type steps: int + :return: + :rtype: + """ + try: + start_point = (await self.get_position()).center + except AttributeError: + return + if not start_point: + logger.warning("could not calculate box model for %s", self) + return + end_point = None + if isinstance(destination, Element): + try: + end_point = (await destination.get_position()).center + except AttributeError: + return + if not end_point: + logger.warning("could not calculate box model for %s", destination) + return + elif isinstance(destination, (tuple, list)): + if relative: + end_point = ( + start_point[0] + destination[0], + start_point[1] + destination[1], + ) + else: + end_point = destination + + await self._tab.send( + cdp.input_.dispatch_mouse_event( + "mousePressed", + x=start_point[0], + y=start_point[1], + button=cdp.input_.MouseButton("left"), + ) + ) + + steps = 1 if (not steps or steps < 1) else steps + if steps == 1: + await self._tab.send( + cdp.input_.dispatch_mouse_event( + "mouseMoved", + x=end_point[0], + y=end_point[1], + ) + ) + elif steps > 1: + # probably the worst waay of calculating this. but couldn't think of a better solution today. + step_size_x = (end_point[0] - start_point[0]) / steps + step_size_y = (end_point[1] - start_point[1]) / steps + pathway = [ + (start_point[0] + step_size_x * i, start_point[1] + step_size_y * i) + for i in range(steps + 1) + ] + + for point in pathway: + await self._tab.send( + cdp.input_.dispatch_mouse_event( + "mouseMoved", + x=point[0], + y=point[1], + ) + ) + await asyncio.sleep(0) + + await self._tab.send( + cdp.input_.dispatch_mouse_event( + type_="mouseReleased", + x=end_point[0], + y=end_point[1], + button=cdp.input_.MouseButton("left"), + ) + )
+ + +
+[docs] + async def scroll_into_view(self): + """scrolls element into view""" + try: + await self.tab.send( + cdp.dom.scroll_into_view_if_needed(backend_node_id=self.backend_node_id) + ) + except Exception as e: + logger.debug("could not scroll into view: %s", e) + return
+ + + # await self.apply("""(el) => el.scrollIntoView(false)""") + +
+[docs] + async def clear_input(self, _until_event: type = None): + """clears an input field""" + return await self.apply('function (element) { element.value = "" } ')
+ + +
+[docs] + async def send_keys(self, text: str): + """ + send text to an input field, or any other html element. + + hint, if you ever get stuck where using py:meth:`~click` + does not work, sending the keystroke \\n or \\r\\n or a spacebar work wonders! + + :param text: text to send + :return: None + """ + await self.apply("(elem) => elem.focus()") + [ + await self._tab.send(cdp.input_.dispatch_key_event("char", text=char)) + for char in list(text) + ]
+ + +
+[docs] + async def send_file(self, *file_paths: PathLike): + """ + some form input require a file (upload), a full path needs to be provided. + this method sends 1 or more file(s) to the input field. + + needles to say, but make sure the field accepts multiple files if you want to send more files. + otherwise the browser might crash. + + example : + `await fileinputElement.send_file('c:/temp/image.png', 'c:/users/myuser/lol.gif')` + + """ + file_paths = [str(p) for p in file_paths] + await self._tab.send( + cdp.dom.set_file_input_files( + files=[*file_paths], + backend_node_id=self.backend_node_id, + object_id=self.object_id, + ) + )
+ + +
+[docs] + async def focus(self): + """focus the current element. often useful in form (select) fields""" + return await self.apply("(element) => element.focus()")
+ + +
+[docs] + async def select_option(self): + """for form (select) fields. when you have queried the options you can call this method on the option object + + calling :func:`option.select_option()` will use that option as selected value. + does not work in all cases. + + """ + if self.node_name == "OPTION": + return await self.apply("(o) => o.selected = true")
+ + +
+[docs] + async def set_value(self, value): + await self._tab.send(cdp.dom.set_node_value(node_id=self.node_id, value=value))
+ + +
+[docs] + async def set_text(self, value): + if not self.node_type == 3: + if self.child_node_count == 1: + child_node = self.children[0] + await child_node.set_text(value) + await self.update() + return + else: + raise RuntimeError("could only set value of text nodes") + await self.update() + await self._tab.send(cdp.dom.set_node_value(node_id=self.node_id, value=value))
+ + +
+[docs] + async def get_html(self): + return await self._tab.send( + cdp.dom.get_outer_html(backend_node_id=self.backend_node_id) + )
+ + + @property + def text(self) -> str: + """ + gets the text contents of this element + note: this includes text in the form of script content, as those are also just 'text nodes' + + :return: + :rtype: + """ + text_node = util.filter_recurse(self.node, lambda n: n.node_type == 3) + if text_node: + return text_node.node_value + return "" + + @property + def text_all(self): + """ + gets the text contents of this element, and it's children in a concatenated string + note: this includes text in the form of script content, as those are also just 'text nodes' + :return: + :rtype: + """ + text_nodes = util.filter_recurse_all(self.node, lambda n: n.node_type == 3) + return " ".join([n.node_value for n in text_nodes]) + +
+[docs] + async def query_selector_all(self, selector: str): + """ + like js querySelectorAll() + """ + await self.update() + return await self.tab.query_selector_all(selector, _node=self)
+ + +
+[docs] + async def query_selector(self, selector): + """ + like js querySelector() + """ + + await self.update() + return await self.tab.query_selector(selector, self)
+ + + # +
+[docs] + async def save_screenshot( + self, + filename: typing.Optional[PathLike] = "auto", + format: typing.Optional[str] = "jpeg", + scale: typing.Optional[typing.Union[int, float]] = 1, + ): + """ + Saves a screenshot of this element (only) + This is not the same as :py:obj:`Tab.save_screenshot`, which saves a "regular" screenshot + + When the element is hidden, or has no size, or is otherwise not capturable, a RuntimeError is raised + + :param filename: uses this as the save path + :type filename: PathLike + :param format: jpeg or png (defaults to jpeg) + :type format: str + :param scale: the scale of the screenshot, eg: 1 = size as is, 2 = double, 0.5 is half + :return: the path/filename of saved screenshot + :rtype: str + """ + + import urllib.parse + import datetime + import base64 + + pos = await self.get_position() + if not pos: + raise RuntimeError( + "could not determine position of element. probably because it's not in view, or hidden" + ) + viewport = pos.to_viewport(scale) + path = None + await self.tab.sleep() + if not filename or filename == "auto": + parsed = urllib.parse.urlparse(self.tab.target.url) + parts = parsed.path.split("/") + last_part = parts[-1] + last_part = last_part.rsplit("?", 1)[0] + dt_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + candidate = f"{parsed.hostname}__{last_part}_{dt_str}" + ext = "" + if format.lower() in ["jpg", "jpeg"]: + ext = ".jpg" + format = "jpeg" + elif format.lower() in ["png"]: + ext = ".png" + format = "png" + path = pathlib.Path(candidate + ext) + else: + path = pathlib.Path(filename) + + path.parent.mkdir(parents=True, exist_ok=True) + data = await self._tab.send( + cdp.page.capture_screenshot( + format, clip=viewport, capture_beyond_viewport=True + ) + ) + if not data: + from .connection import ProtocolException + + raise ProtocolException( + "could not take screenshot. most possible cause is the page has not finished loading yet." + ) + + data_bytes = base64.b64decode(data) + if not path: + raise RuntimeError("invalid filename or path: '%s'" % filename) + path.write_bytes(data_bytes) + return str(path)
+ + +
+[docs] + async def flash(self, duration: typing.Union[float, int] = 0.5): + """ + displays for a short time a red dot on the element (only if the element itself is visible) + + :param coords: x,y + :type coords: x,y + :param duration: seconds (default 0.5) + :type duration: + :return: + :rtype: + """ + from .connection import ProtocolException + + if not self.remote_object: + try: + self._remote_object = await self.tab.send( + cdp.dom.resolve_node(backend_node_id=self.backend_node_id) + ) + except ProtocolException: + return + try: + pos = await self.get_position() + + except (Exception,): + logger.debug("flash() : could not determine position") + return + + style = ( + "position:absolute;z-index:99999999;padding:0;margin:0;" + "left:{:.1f}px; top: {:.1f}px;" + "opacity:1;" + "width:16px;height:16px;border-radius:50%;background:red;" + "animation:show-pointer-ani {:.2f}s ease 1;" + ).format( + pos.center[0] - 8, # -8 to account for drawn circle itself (w,h) + pos.center[1] - 8, + duration, + ) + script = ( + """ + (targetElement) => {{ + var css = document.styleSheets[0]; + for( let css of [...document.styleSheets]) {{ + try {{ + css.insertRule(` + @keyframes show-pointer-ani {{ + 0% {{ opacity: 1; transform: scale(2, 2);}} + 25% {{ transform: scale(5,5) }} + 50% {{ transform: scale(3, 3);}} + 75%: {{ transform: scale(2,2) }} + 100% {{ transform: scale(1, 1); opacity: 0;}} + }}`,css.cssRules.length); + break; + }} catch (e) {{ + console.log(e) + }} + }}; + var _d = document.createElement('div'); + _d.style = `{0:s}`; + _d.id = `{1:s}`; + document.body.insertAdjacentElement('afterBegin', _d); + + setTimeout( () => document.getElementById('{1:s}').remove(), {2:d}); + }} + """.format( + style, + secrets.token_hex(8), + int(duration * 1000), + ) + .replace(" ", "") + .replace("\n", "") + ) + + arguments = [cdp.runtime.CallArgument(object_id=self._remote_object.object_id)] + await self._tab.send( + cdp.runtime.call_function_on( + script, + object_id=self._remote_object.object_id, + arguments=arguments, + await_promise=True, + user_gesture=True, + ) + )
+ + +
+[docs] + async def highlight_overlay(self): + """ + highlights the element devtools-style. To remove the highlight, + call the method again. + :return: + :rtype: + """ + + if getattr(self, "_is_highlighted", False): + del self._is_highlighted + await self.tab.send(cdp.overlay.hide_highlight()) + await self.tab.send(cdp.dom.disable()) + await self.tab.send(cdp.overlay.disable()) + return + await self.tab.send(cdp.dom.enable()) + await self.tab.send(cdp.overlay.enable()) + conf = cdp.overlay.HighlightConfig( + show_info=True, show_extension_lines=True, show_styles=True + ) + await self.tab.send( + cdp.overlay.highlight_node( + highlight_config=conf, backend_node_id=self.backend_node_id + ) + ) + setattr(self, "_is_highlighted", 1)
+ + +
+[docs] + async def record_video( + self, + filename: typing.Optional[str] = None, + folder: typing.Optional[str] = None, + duration: typing.Optional[typing.Union[int, float]] = None, + ): + """ + experimental option. + + :param filename: the desired filename + :param folder: the download folder path + :param duration: record for this many seconds and then download + + on html5 video nodes, you can call this method to start recording of the video. + + when any of the follow happens: + + - video ends + - calling videoelement('pause') + - video stops + + the video recorded will be downloaded. + + """ + if self.node_name != "VIDEO": + raise RuntimeError( + "record_video can only be called on html5 video elements" + ) + if not folder: + directory_path = pathlib.Path.cwd() / "downloads" + else: + directory_path = pathlib.Path(folder) + + directory_path.mkdir(exist_ok=True) + await self._tab.send( + cdp.browser.set_download_behavior( + "allow", download_path=str(directory_path) + ) + ) + await self("pause") + await self.apply( + """ + function extractVid(vid) {{ + + var duration = {duration:.1f}; + var stream = vid.captureStream(); + var mr = new MediaRecorder(stream, {{audio:true, video:true}}) + mr.ondataavailable = function(e) {{ + vid['_recording'] = false + var blob = e.data; + f = new File([blob], {{name: {filename}, type:'octet/stream'}}); + var objectUrl = URL.createObjectURL(f); + var link = document.createElement('a'); + link.setAttribute('href', objectUrl) + link.setAttribute('download', {filename}) + link.style.display = 'none' + + document.body.appendChild(link) + + link.click() + + document.body.removeChild(link) + }} + + mr.start() + vid.addEventListener('ended' , (e) => mr.stop()) + vid.addEventListener('pause' , (e) => mr.stop()) + vid.addEventListener('abort', (e) => mr.stop()) + + + if ( duration ) {{ + setTimeout(() => {{ vid.pause(); vid.play() }}, duration); + }} + vid['_recording'] = true + ;}} + + """.format( + filename=f'"{filename}"' if filename else 'document.title + ".mp4"', + duration=int(duration * 1000) if duration else 0, + ) + ) + await self("play") + await self._tab
+ + +
+[docs] + async def is_recording(self): + return await self.apply('(vid) => vid["_recording"]')
+ + + def _make_attrs(self): + sav = None + if self.node.attributes: + for i, a in enumerate(self.node.attributes): + if i == 0 or i % 2 == 0: + if a == "class": + a = "class_" + sav = a + else: + if sav: + self.attrs[sav] = a + + def __eq__(self, other: Element) -> bool: + # if other.__dict__.values() == self.__dict__.values(): + # return True + if other.backend_node_id and self.backend_node_id: + return other.backend_node_id == self.backend_node_id + + return False + + def __repr__(self): + tag_name = self.node.node_name.lower() + content = "" + + # collect all text from this leaf + if self.child_node_count: + if self.child_node_count == 1: + if self.children: + content += str(self.children[0]) + + elif self.child_node_count > 1: + if self.children: + for child in self.children: + content += str(child) + + if self.node.node_type == 3: # we could be a text node ourselves + content += self.node_value + + # return text only, no tag names + # this makes it look most natural, and compatible with other hml libs + + return content + + attrs = " ".join( + [f'{k if k != "class_" else "class"}="{v}"' for k, v in self.attrs.items()] + ) + s = f"<{tag_name} {attrs}>{content}</{tag_name}>" + return s
+ + + +class Position(cdp.dom.Quad): + """helper class for element positioning""" + + def __init__(self, points): + super().__init__(points) + ( + self.left, + self.top, + self.right, + self.top, + self.right, + self.bottom, + self.left, + self.bottom, + ) = points + self.abs_x: float = 0 + self.abs_y: float = 0 + self.x = self.left + self.y = self.top + self.height, self.width = (self.bottom - self.top, self.right - self.left) + self.center = ( + self.left + (self.width / 2), + self.top + (self.height / 2), + ) + + def to_viewport(self, scale=1): + return cdp.page.Viewport( + x=self.x, y=self.y, width=self.width, height=self.height, scale=scale + ) + + def __repr__(self): + return f"<Position(x={self.left}, y={self.top}, width={self.width}, height={self.height})>" + + +async def resolve_node(tab: Tab, node_id: cdp.dom.NodeId): + remote_obj: cdp.runtime.RemoteObject = await tab.send( + cdp.dom.resolve_node(node_id=node_id) + ) + node_id: cdp.dom.NodeId = await tab.send(cdp.dom.request_node(remote_obj.object_id)) + node: cdp.dom.Node = await tab.send(cdp.dom.describe_node(node_id)) + return node +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/nodriver/core/tab.html b/docs/_build/html/_modules/nodriver/core/tab.html new file mode 100644 index 0000000..8bd805c --- /dev/null +++ b/docs/_build/html/_modules/nodriver/core/tab.html @@ -0,0 +1,1754 @@ + + + + + + + + nodriver.core.tab - nodriver documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for nodriver.core.tab

+from __future__ import annotations
+import asyncio
+import json
+import logging
+import pathlib
+import typing
+import warnings
+from typing import List, Union, Optional, Tuple
+
+import nodriver.core.browser
+from . import element
+from . import util
+from .config import PathLike
+from .connection import Connection, ProtocolException
+from .. import cdp
+
+logger = logging.getLogger(__name__)
+
+
+
+[docs] +class Tab(Connection): + """ + :ref:`tab` is the controlling mechanism/connection to a 'target', + for most of us 'target' can be read as 'tab'. however it could also + be an iframe, serviceworker or background script for example, + although there isn't much to control for those. + + if you open a new window by using :py:meth:`browser.get(..., new_window=True)` + your url will open a new window. this window is a 'tab'. + When you browse to another page, the tab will be the same (it is an browser view). + + So it's important to keep some reference to tab objects, in case you're + done interacting with elements and want to operate on the page level again. + + Custom CDP commands + --------------------------- + Tab object provide many useful and often-used methods. It is also + possible to utilize the included cdp classes to to something totally custom. + + the cdp package is a set of so-called "domains" with each having methods, events and types. + to send a cdp method, for example :py:obj:`cdp.page.navigate`, you'll have to check + whether the method accepts any parameters and whether they are required or not. + + you can use + + ```python + await tab.send(cdp.page.navigate(url='https://yoururlhere')) + ``` + + so tab.send() accepts a generator object, which is created by calling a cdp method. + this way you can build very detailed and customized commands. + (note: finding correct command combo's can be a time consuming task, luckily i added a whole bunch + of useful methods, preferably having the same api's or lookalikes, as in selenium) + + + some useful, often needed and simply required methods + =================================================================== + + + :py:meth:`~find` | find(text) + ---------------------------------------- + find and returns a single element by text match. by default returns the first element found. + much more powerful is the best_match flag, although also much more expensive. + when no match is found, it will retry for <timeout> seconds (default: 10), so + this is also suitable to use as wait condition. + + + :py:meth:`~find` | find(text, best_match=True) or find(text, True) + --------------------------------------------------------------------------------- + Much more powerful (and expensive!!) than the above, is the use of the `find(text, best_match=True)` flag. + It will still return 1 element, but when multiple matches are found, picks the one having the + most similar text length. + How would that help? + For example, you search for "login", you'd probably want the "login" button element, + and not thousands of scripts,meta,headings which happens to contain a string of "login". + + when no match is found, it will retry for <timeout> seconds (default: 10), so + this is also suitable to use as wait condition. + + + :py:meth:`~select` | select(selector) + ---------------------------------------- + find and returns a single element by css selector match. + when no match is found, it will retry for <timeout> seconds (default: 10), so + this is also suitable to use as wait condition. + + + :py:meth:`~select_all` | select_all(selector) + ------------------------------------------------ + find and returns all elements by css selector match. + when no match is found, it will retry for <timeout> seconds (default: 10), so + this is also suitable to use as wait condition. + + + await :py:obj:`Tab` + --------------------------- + calling `await tab` will do a lot of stuff under the hood, and ensures all references + are up to date. also it allows for the script to "breathe", as it is oftentime faster than your browser or + webpage. So whenever you get stuck and things crashes or element could not be found, you should probably let + it "breathe" by calling `await page` and/or `await page.sleep()` + + also, it's ensuring :py:obj:`~url` will be updated to the most recent one, which is quite important in some + other methods. + + Using other and custom CDP commands + ====================================================== + using the included cdp module, you can easily craft commands, which will always return an generator object. + this generator object can be easily sent to the :py:meth:`~send` method. + + :py:meth:`~send` + --------------------------- + this is probably THE most important method, although you won't ever call it, unless you want to + go really custom. the send method accepts a :py:obj:`cdp` command. Each of which can be found in the + cdp section. + + when you import * from this package, cdp will be in your namespace, and contains all domains/actions/events + you can act upon. + """ + + browser: nodriver.core.browser.Browser + _download_behavior: List[str] = None + + def __init__( + self, + websocket_url: str, + target: cdp.target.TargetInfo, + browser: Optional["nodriver.Browser"] = None, + **kwargs, + ): + super().__init__(websocket_url, target, **kwargs) + self.browser = browser + self._dom = None + self._window_id = None + + @property + def inspector_url(self): + """ + get the inspector url. this url can be used in another browser to show you the devtools interface for + current tab. useful for debugging (and headless) + :return: + :rtype: + """ + return f"http://{self.browser.config.host}:{self.browser.config.port}/devtools/inspector.html?ws={self.websocket_url[5:]}" + +
+[docs] + def inspector_open(self): + import webbrowser + + webbrowser.open(self.inspector_url, new=2)
+ + +
+[docs] + async def open_external_inspector(self): + """ + opens the system's browser containing the devtools inspector page + for this tab. could be handy, especially to debug in headless mode. + """ + import webbrowser + + webbrowser.open(self.inspector_url)
+ + +
+[docs] + async def find( + self, + text: str, + best_match: bool = True, + return_enclosing_element=True, + timeout: Union[int, float] = 10, + ): + """ + find single element by text + can also be used to wait for such element to appear. + + :param text: text to search for. note: script contents are also considered text + :type text: str + :param best_match: :param best_match: when True (default), it will return the element which has the most + comparable string length. this could help tremendously, when for example + you search for "login", you'd probably want the login button element, + and not thousands of scripts,meta,headings containing a string of "login". + When False, it will return naively just the first match (but is way faster). + :type best_match: bool + :param return_enclosing_element: + since we deal with nodes instead of elements, the find function most often returns + so called text nodes, which is actually a element of plain text, which is + the somehow imaginary "child" of a "span", "p", "script" or any other elements which have text between their opening + and closing tags. + most often when we search by text, we actually aim for the element containing the text instead of + a lousy plain text node, so by default the containing element is returned. + + however, there are (why not) exceptions, for example elements that use the "placeholder=" property. + this text is rendered, but is not a pure text node. in that case you can set this flag to False. + since in this case we are probably interested in just that element, and not it's parent. + + + # todo, automatically determine node type + # ignore the return_enclosing_element flag if the found node is NOT a text node but a + # regular element (one having a tag) in which case that is exactly what we need. + :type return_enclosing_element: bool + :param timeout: raise timeout exception when after this many seconds nothing is found. + :type timeout: float,int + """ + loop = asyncio.get_running_loop() + start_time = loop.time() + + text = text.strip() + + item = await self.find_element_by_text( + text, best_match, return_enclosing_element + ) + while not item: + await self + item = await self.find_element_by_text( + text, best_match, return_enclosing_element + ) + if loop.time() - start_time > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for text: %s" % text + ) + await self.sleep(0.5) + return item
+ + +
+[docs] + async def select( + self, + selector: str, + timeout: Union[int, float] = 10, + ) -> nodriver.Element: + """ + find single element by css selector. + can also be used to wait for such element to appear. + + :param selector: css selector, eg a[href], button[class*=close], a > img[src] + :type selector: str + + :param timeout: raise timeout exception when after this many seconds nothing is found. + :type timeout: float,int + + """ + loop = asyncio.get_running_loop() + start_time = loop.time() + + selector = selector.strip() + item = await self.query_selector(selector) + + while not item: + await self + item = await self.query_selector(selector) + if loop.time() - start_time > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for %s" % selector + ) + await self.sleep(0.5) + return item
+ + +
+[docs] + async def find_all( + self, + text: str, + timeout: Union[int, float] = 10, + ) -> List[nodriver.Element]: + """ + find multiple elements by text + can also be used to wait for such element to appear. + + :param text: text to search for. note: script contents are also considered text + :type text: str + + :param timeout: raise timeout exception when after this many seconds nothing is found. + :type timeout: float,int + """ + loop = asyncio.get_running_loop() + now = loop.time() + + text = text.strip() + items = await self.find_elements_by_text(text) + + while not items: + await self + results = await self.find_elements_by_text(text) + if loop.time() - now > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for text: %s" % text + ) + await self.sleep(0.5) + return items
+ + +
+[docs] + async def select_all( + self, selector: str, timeout: Union[int, float] = 10, include_frames=False + ) -> List[nodriver.Element]: + """ + find multiple elements by css selector. + can also be used to wait for such element to appear. + + + :param selector: css selector, eg a[href], button[class*=close], a > img[src] + :type selector: str + :param timeout: raise timeout exception when after this many seconds nothing is found. + :type timeout: float,int + :param include_frames: whether to include results in iframes. + :type include_frames: bool + """ + + loop = asyncio.get_running_loop() + now = loop.time() + selector = selector.strip() + items = [] + if include_frames: + frames = await self.query_selector_all("iframe") + # unfortunately, asyncio.gather here is not an option + for fr in frames: + items.extend(await fr.query_selector_all(selector)) + + items.extend(await self.query_selector_all(selector)) + while not items: + await self + items = await self.query_selector_all(selector) + if loop.time() - now > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for %s" % selector + ) + await self.sleep(0.5) + return items
+ + +
+[docs] + async def get( + self, url="chrome://welcome", new_tab: bool = False, new_window: bool = False + ): + """top level get. utilizes the first tab to retrieve given url. + + convenience function known from selenium. + this function handles waits/sleeps and detects when DOM events fired, so it's the safest + way of navigating. + + :param url: the url to navigate to + :param new_tab: open new tab + :param new_window: open new window + :return: Page + """ + if not self.browser: + raise AttributeError( + "this page/tab has no browser attribute, so you can't use get()" + ) + if new_window and not new_tab: + new_tab = True + + if new_tab: + return await self.browser.get(url, new_tab, new_window) + else: + frame_id, loader_id, *_ = await self.send(cdp.page.navigate(url)) + await self + return self
+ + +
+[docs] + async def query_selector_all( + self, + selector: str, + _node: Optional[Union[cdp.dom.Node, "element.Element"]] = None, + ): + """ + equivalent of javascripts document.querySelectorAll. + this is considered one of the main methods to use in this package. + + it returns all matching :py:obj:`nodriver.Element` objects. + + :param selector: css selector. (first time? => https://www.w3schools.com/cssref/css_selectors.php ) + :type selector: str + :param _node: internal use + :type _node: + :return: + :rtype: + """ + + if not _node: + doc: cdp.dom.Node = await self.send(cdp.dom.get_document(-1, True)) + else: + doc = _node + if _node.node_name == "IFRAME": + doc = _node.content_document + node_ids = [] + + try: + node_ids = await self.send( + cdp.dom.query_selector_all(doc.node_id, selector) + ) + + except ProtocolException as e: + if _node is not None: + if "could not find node" in e.message.lower(): + if getattr(_node, "__last", None): + del _node.__last + return [] + # if supplied node is not found, the dom has changed since acquiring the element + # therefore we need to update our passed node and try again + await _node.update() + _node.__last = ( + True # make sure this isn't turned into infinite loop + ) + return await self.query_selector_all(selector, _node) + else: + await self.send(cdp.dom.disable()) + raise + if not node_ids: + return [] + items = [] + + for nid in node_ids: + node = util.filter_recurse(doc, lambda n: n.node_id == nid) + # we pass along the retrieved document tree, + # to improve performance + if not node: + continue + elem = element.create(node, self, doc) + items.append(elem) + + return items
+ + +
+[docs] + async def query_selector( + self, + selector: str, + _node: Optional[Union[cdp.dom.Node, element.Element]] = None, + ): + """ + find single element based on css selector string + + :param selector: css selector(s) + :type selector: str + :return: + :rtype: + """ + selector = selector.strip() + + if not _node: + doc: cdp.dom.Node = await self.send(cdp.dom.get_document(-1, True)) + else: + doc = _node + if _node.node_name == "IFRAME": + doc = _node.content_document + node_id = None + + try: + node_id = await self.send(cdp.dom.query_selector(doc.node_id, selector)) + + except ProtocolException as e: + if _node is not None: + if "could not find node" in e.message.lower(): + if getattr(_node, "__last", None): + del _node.__last + return [] + # if supplied node is not found, the dom has changed since acquiring the element + # therefore we need to update our passed node and try again + await _node.update() + _node.__last = ( + True # make sure this isn't turned into infinite loop + ) + return await self.query_selector(selector, _node) + else: + await self.send(cdp.dom.disable()) + raise + if not node_id: + return + node = util.filter_recurse(doc, lambda n: n.node_id == node_id) + if not node: + return + return element.create(node, self, doc)
+ + +
+[docs] + async def find_elements_by_text( + self, + text: str, + tag_hint: Optional[str] = None, + ) -> List[element.Element]: + """ + returns element which match the given text. + please note: this may (or will) also return any other element (like inline scripts), + which happen to contain that text. + + :param text: + :type text: + :param tag_hint: when provided, narrows down search to only elements which match given tag eg: a, div, script, span + :type tag_hint: str + :return: + :rtype: + """ + text = text.strip() + doc = await self.send(cdp.dom.get_document(-1, True)) + search_id, nresult = await self.send(cdp.dom.perform_search(text, True)) + if nresult: + node_ids = await self.send( + cdp.dom.get_search_results(search_id, 0, nresult) + ) + else: + node_ids = [] + + await self.send(cdp.dom.discard_search_results(search_id)) + + items = [] + for nid in node_ids: + node = util.filter_recurse(doc, lambda n: n.node_id == nid) + if not node: + node = await self.send(cdp.dom.resolve_node(node_id=nid)) + if not node: + continue + # remote_object = await self.send(cdp.dom.resolve_node(backend_node_id=node.backend_node_id)) + # node_id = await self.send(cdp.dom.request_node(object_id=remote_object.object_id)) + try: + elem = element.create(node, self, doc) + except: # noqa + continue + if elem.node_type == 3: + # if found element is a text node (which is plain text, and useless for our purpose), + # we return the parent element of the node (which is often a tag which can have text between their + # opening and closing tags (that is most tags, except for example "img" and "video", "br") + + if not elem.parent: + # check if parent actually has a parent and update it to be absolutely sure + await elem.update() + + items.append( + elem.parent or elem + ) # when it really has no parent, use the text node itself + continue + else: + # just add the element itself + items.append(elem) + + # since we already fetched the entire doc, including shadow and frames + # let's also search through the iframes + iframes = util.filter_recurse_all(doc, lambda node: node.node_name == "IFRAME") + if iframes: + iframes_elems = [ + element.create(iframe, self, iframe.content_document) + for iframe in iframes + ] + for iframe_elem in iframes_elems: + if iframe_elem.content_document: + iframe_text_nodes = util.filter_recurse_all( + iframe_elem, + lambda node: node.node_type == 3 # noqa + and text.lower() in node.node_value.lower(), + ) + if iframe_text_nodes: + iframe_text_elems = [ + element.create(text_node, self, iframe_elem.tree) + for text_node in iframe_text_nodes + ] + items.extend( + text_node.parent for text_node in iframe_text_elems + ) + await self.send(cdp.dom.disable()) + return items or []
+ + +
+[docs] + async def find_element_by_text( + self, + text: str, + best_match: Optional[bool] = False, + return_enclosing_element: Optional[bool] = True, + ) -> Union[element.Element, None]: + """ + finds and returns the first element containing <text>, or best match + + :param text: + :type text: + :param best_match: when True, which is MUCH more expensive (thus much slower), + will find the closest match based on length. + this could help tremendously, when for example you search for "login", you'd probably want the login button element, + and not thousands of scripts,meta,headings containing a string of "login". + + :type best_match: bool + :param return_enclosing_element: + :type return_enclosing_element: + :return: + :rtype: + """ + doc = await self.send(cdp.dom.get_document(-1, True)) + text = text.strip() + search_id, nresult = await self.send(cdp.dom.perform_search(text, True)) + + node_ids = await self.send(cdp.dom.get_search_results(search_id, 0, nresult)) + await self.send(cdp.dom.discard_search_results(search_id)) + + if not node_ids: + node_ids = [] + items = [] + for nid in node_ids: + node = util.filter_recurse(doc, lambda n: n.node_id == nid) + try: + elem = element.create(node, self, doc) + except: # noqa + continue + if elem.node_type == 3: + # if found element is a text node (which is plain text, and useless for our purpose), + # we return the parent element of the node (which is often a tag which can have text between their + # opening and closing tags (that is most tags, except for example "img" and "video", "br") + + if not elem.parent: + # check if parent actually has a parent and update it to be absolutely sure + await elem.update() + + items.append( + elem.parent or elem + ) # when it really has no parent, use the text node itself + continue + else: + # just add the element itself + items.append(elem) + + # since we already fetched the entire doc, including shadow and frames + # let's also search through the iframes + iframes = util.filter_recurse_all(doc, lambda node: node.node_name == "IFRAME") + if iframes: + iframes_elems = [ + element.create(iframe, self, iframe.content_document) + for iframe in iframes + ] + for iframe_elem in iframes_elems: + iframe_text_nodes = util.filter_recurse_all( + iframe_elem, + lambda node: node.node_type == 3 # noqa + and text.lower() in node.node_value.lower(), + ) + if iframe_text_nodes: + iframe_text_elems = [ + element.create(text_node, self, iframe_elem.tree) + for text_node in iframe_text_nodes + ] + items.extend(text_node.parent for text_node in iframe_text_elems) + try: + if not items: + return + if best_match: + closest_by_length = min( + items, key=lambda el: abs(len(text) - len(el.text_all)) + ) + elem = closest_by_length or items[0] + + return elem + else: + # naively just return the first result + for elem in items: + if elem: + return elem + finally: + await self.send(cdp.dom.disable())
+ + +
+[docs] + async def back(self): + """ + history back + """ + await self.send(cdp.runtime.evaluate("window.history.back()"))
+ + +
+[docs] + async def forward(self): + """ + history forward + """ + await self.send(cdp.runtime.evaluate("window.history.forward()"))
+ + +
+[docs] + async def reload( + self, + ignore_cache: Optional[bool] = True, + script_to_evaluate_on_load: Optional[str] = None, + ): + """ + Reloads the page + + :param ignore_cache: when set to True (default), it ignores cache, and re-downloads the items + :type ignore_cache: + :param script_to_evaluate_on_load: script to run on load. I actually haven't experimented with this one, so no guarantees. + :type script_to_evaluate_on_load: + :return: + :rtype: + """ + await self.send( + cdp.page.reload( + ignore_cache=ignore_cache, + script_to_evaluate_on_load=script_to_evaluate_on_load, + ), + )
+ + +
+[docs] + async def evaluate( + self, expression: str, await_promise=False, return_by_value=True + ): + remote_object, errors = await self.send( + cdp.runtime.evaluate( + expression=expression, + user_gesture=True, + await_promise=await_promise, + return_by_value=return_by_value, + allow_unsafe_eval_blocked_by_csp=True, + ) + ) + if errors: + raise ProtocolException(errors) + + if remote_object: + if return_by_value: + if remote_object.value: + return remote_object.value + + else: + return remote_object, errors
+ + +
+[docs] + async def js_dumps( + self, obj_name: str, return_by_value: Optional[bool] = True + ) -> typing.Union[ + typing.Dict, + typing.Tuple[cdp.runtime.RemoteObject, cdp.runtime.ExceptionDetails], + ]: + """ + dump given js object with its properties and values as a dict + + note: complex objects might not be serializable, therefore this method is not a "source of thruth" + + :param obj_name: the js object to dump + :type obj_name: str + + :param return_by_value: if you want an tuple of cdp objects (returnvalue, errors), set this to False + :type return_by_value: bool + + example + ------ + + x = await self.js_dumps('window') + print(x) + '...{ + 'pageYOffset': 0, + 'visualViewport': {}, + 'screenX': 10, + 'screenY': 10, + 'outerWidth': 1050, + 'outerHeight': 832, + 'devicePixelRatio': 1, + 'screenLeft': 10, + 'screenTop': 10, + 'styleMedia': {}, + 'onsearch': None, + 'isSecureContext': True, + 'trustedTypes': {}, + 'performance': {'timeOrigin': 1707823094767.9, + 'timing': {'connectStart': 0, + 'navigationStart': 1707823094768, + ]... + ' + """ + js_code_a = ( + """ + function ___dump(obj, _d = 0) { + let _typesA = ['object', 'function']; + let _typesB = ['number', 'string', 'boolean']; + if (_d == 2) { + console.log('maxdepth reached for ', obj); + return + } + let tmp = {} + for (let k in obj) { + if (obj[k] == window) continue; + let v; + try { + if (obj[k] === null || obj[k] === undefined || obj[k] === NaN) { + console.log('obj[k] is null or undefined or Nan', k, '=>', obj[k]) + tmp[k] = obj[k]; + continue + } + } catch (e) { + tmp[k] = null; + continue + } + + + if (_typesB.includes(typeof obj[k])) { + tmp[k] = obj[k] + continue + } + + try { + if (typeof obj[k] === 'function') { + tmp[k] = obj[k].toString() + continue + } + + + if (typeof obj[k] === 'object') { + tmp[k] = ___dump(obj[k], _d + 1); + continue + } + + + } catch (e) {} + + try { + tmp[k] = JSON.stringify(obj[k]) + continue + } catch (e) { + + } + try { + tmp[k] = obj[k].toString(); + continue + } catch (e) {} + } + return tmp + } + + function ___dumpY(obj) { + var objKeys = (obj) => { + var [target, result] = [obj, []]; + while (target !== null) { + result = result.concat(Object.getOwnPropertyNames(target)); + target = Object.getPrototypeOf(target); + } + return result; + } + return Object.fromEntries( + objKeys(obj).map(_ => [_, ___dump(obj[_])])) + + } + ___dumpY( %s ) + """ + % obj_name + ) + js_code_b = ( + """ + ((obj, visited = new WeakSet()) => { + if (visited.has(obj)) { + return {} + } + visited.add(obj) + var result = {}, _tmp; + for (var i in obj) { + try { + if (i === 'enabledPlugin' || typeof obj[i] === 'function') { + continue; + } else if (typeof obj[i] === 'object') { + _tmp = recurse(obj[i], visited); + if (Object.keys(_tmp).length) { + result[i] = _tmp; + } + } else { + result[i] = obj[i]; + } + } catch (error) { + // console.error('Error:', error); + } + } + return result; + })(%s) + """ + % obj_name + ) + + # we're purposely not calling self.evaluate here to prevent infinite loop on certain expressions + + remote_object, exception_details = await self.send( + cdp.runtime.evaluate( + js_code_a, + await_promise=True, + return_by_value=return_by_value, + allow_unsafe_eval_blocked_by_csp=True, + ) + ) + if exception_details: + + # try second variant + + remote_object, exception_details = await self.send( + cdp.runtime.evaluate( + js_code_b, + await_promise=True, + return_by_value=return_by_value, + allow_unsafe_eval_blocked_by_csp=True, + ) + ) + + if exception_details: + raise ProtocolException(exception_details) + if return_by_value: + if remote_object.value: + return remote_object.value + else: + return remote_object, exception_details
+ + +
+[docs] + async def close(self): + """ + close the current target (ie: tab,window,page) + :return: + :rtype: + """ + if self.target and self.target.target_id: + await self.send(cdp.target.close_target(target_id=self.target.target_id))
+ + +
+[docs] + async def get_window(self) -> Tuple[cdp.browser.WindowID, cdp.browser.Bounds]: + """ + get the window Bounds + :return: + :rtype: + """ + window_id, bounds = await self.send( + cdp.browser.get_window_for_target(self.target_id) + ) + return window_id, bounds
+ + +
+[docs] + async def get_content(self): + """ + gets the current page source content (html) + :return: + :rtype: + """ + doc: cdp.dom.Node = await self.send(cdp.dom.get_document(-1, True)) + return await self.send( + cdp.dom.get_outer_html(backend_node_id=doc.backend_node_id) + )
+ + +
+[docs] + async def maximize(self): + """ + maximize page/tab/window + """ + return await self.set_window_state(state="maximize")
+ + +
+[docs] + async def minimize(self): + """ + minimize page/tab/window + """ + return await self.set_window_state(state="minimize")
+ + +
+[docs] + async def fullscreen(self): + """ + minimize page/tab/window + """ + return await self.set_window_state(state="fullscreen")
+ + +
+[docs] + async def medimize(self): + return await self.set_window_state(state="normal")
+ + +
+[docs] + async def set_window_size(self, left=0, top=0, width=1280, height=1024): + """ + set window size and position + + :param left: pixels from the left of the screen to the window top-left corner + :type left: + :param top: pixels from the top of the screen to the window top-left corner + :type top: + :param width: width of the window in pixels + :type width: + :param height: height of the window in pixels + :type height: + :return: + :rtype: + """ + return await self.set_window_state(left, top, width, height)
+ + +
+[docs] + async def activate(self): + """ + active this target (ie: tab,window,page) + """ + await self.send(cdp.target.activate_target(self.target.target_id))
+ + +
+[docs] + async def bring_to_front(self): + """ + alias to self.activate + """ + await self.activate()
+ + +
+[docs] + async def set_window_state( + self, left=0, top=0, width=1280, height=720, state="normal" + ): + """ + sets the window size or state. + + for state you can provide the full name like minimized, maximized, normal, fullscreen, or + something which leads to either of those, like min, mini, mi, max, ma, maxi, full, fu, no, nor + in case state is set other than "normal", the left, top, width, and height are ignored. + + :param left: + desired offset from left, in pixels + :type left: int + + :param top: + desired offset from the top, in pixels + :type top: int + + :param width: + desired width in pixels + :type width: int + + :param height: + desired height in pixels + :type height: int + + :param state: + can be one of the following strings: + - normal + - fullscreen + - maximized + - minimized + + :type state: str + + """ + available_states = ["minimized", "maximized", "fullscreen", "normal"] + window_id: cdp.browser.WindowID + bounds: cdp.browser.Bounds + (window_id, bounds) = await self.get_window() + + for state_name in available_states: + if all(x in state_name for x in state.lower()): + break + else: + raise NameError( + "could not determine any of %s from input '%s'" + % (",".join(available_states), state) + ) + window_state = getattr( + cdp.browser.WindowState, state_name.upper(), cdp.browser.WindowState.NORMAL + ) + if window_state == cdp.browser.WindowState.NORMAL: + bounds = cdp.browser.Bounds(left, top, width, height, window_state) + else: + # min, max, full can only be used when current state == NORMAL + # therefore we first switch to NORMAL + await self.set_window_state(state="normal") + bounds = cdp.browser.Bounds(window_state=window_state) + + await self.send(cdp.browser.set_window_bounds(window_id, bounds=bounds))
+ + +
+[docs] + async def scroll_down(self, amount=25): + """ + scrolls down maybe + + :param amount: number in percentage. 25 is a quarter of page, 50 half, and 1000 is 10x the page + :type amount: int + :return: + :rtype: + """ + window_id: cdp.browser.WindowID + bounds: cdp.browser.Bounds + (window_id, bounds) = await self.get_window() + + await self.send( + cdp.input_.synthesize_scroll_gesture( + x=0, + y=0, + y_distance=-(bounds.height * (amount / 100)), + y_overscroll=0, + x_overscroll=0, + prevent_fling=True, + repeat_delay_ms=0, + speed=7777, + ) + )
+ + +
+[docs] + async def scroll_up(self, amount=25): + """ + scrolls up maybe + + :param amount: number in percentage. 25 is a quarter of page, 50 half, and 1000 is 10x the page + :type amount: int + + :return: + :rtype: + """ + window_id: cdp.browser.WindowID + bounds: cdp.browser.Bounds + (window_id, bounds) = await self.get_window() + + await self.send( + cdp.input_.synthesize_scroll_gesture( + x=0, + y=0, + y_distance=(bounds.height * (amount / 100)), + x_overscroll=0, + prevent_fling=True, + repeat_delay_ms=0, + speed=7777, + ) + )
+ + +
+[docs] + async def wait_for( + self, + selector: Optional[str] = "", + text: Optional[str] = "", + timeout: Optional[Union[int, float]] = 10, + ) -> element.Element: + """ + variant on query_selector_all and find_elements_by_text + this variant takes either selector or text, and will block until + the requested element(s) are found. + + it will block for a maximum of <timeout> seconds, after which + an TimeoutError will be raised + + :param selector: css selector + :type selector: + :param text: text + :type text: + :param timeout: + :type timeout: + :return: + :rtype: Element + :raises: asyncio.TimeoutError + """ + loop = asyncio.get_running_loop() + now = loop.time() + if selector: + item = await self.query_selector(selector) + while not item: + item = await self.query_selector(selector) + if loop.time() - now > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for %s" % selector + ) + await self.sleep(0.5) + # await self.sleep(0.5) + return item + if text: + item = await self.find_element_by_text(text) + while not item: + item = await self.find_element_by_text(text) + if loop.time() - now > timeout: + raise asyncio.TimeoutError( + "time ran out while waiting for text: %s" % text + ) + await self.sleep(0.5) + return item
+ + +
+[docs] + async def download_file(self, url: str, filename: Optional[PathLike] = None): + """ + downloads file by given url. + + :param url: url of the file + :param filename: the name for the file. if not specified the name is composed from the url file name + """ + if not self._download_behavior: + directory_path = pathlib.Path.cwd() / "downloads" + directory_path.mkdir(exist_ok=True) + await self.set_download_path(directory_path) + + warnings.warn( + f"no download path set, so creating and using a default of" + f"{directory_path}" + ) + if not filename: + filename = url.rsplit("/")[-1] + filename = filename.split("?")[0] + + code = """ + (elem) => { + async function _downloadFile( + imageSrc, + nameOfDownload, + ) { + const response = await fetch(imageSrc); + const blobImage = await response.blob(); + const href = URL.createObjectURL(blobImage); + + const anchorElement = document.createElement('a'); + anchorElement.href = href; + anchorElement.download = nameOfDownload; + + document.body.appendChild(anchorElement); + anchorElement.click(); + + setTimeout(() => { + document.body.removeChild(anchorElement); + window.URL.revokeObjectURL(href); + }, 500); + } + _downloadFile('%s', '%s') + } + """ % ( + url, + filename, + ) + + body = (await self.query_selector_all("body"))[0] + await body.update() + await self.send( + cdp.runtime.call_function_on( + code, + object_id=body.object_id, + arguments=[cdp.runtime.CallArgument(object_id=body.object_id)], + ) + )
+ + +
+[docs] + async def save_screenshot( + self, + filename: Optional[PathLike] = "auto", + format: Optional[str] = "jpeg", + full_page: Optional[bool] = False, + ) -> str: + """ + Saves a screenshot of the page. + This is not the same as :py:obj:`Element.save_screenshot`, which saves a screenshot of a single element only + + :param filename: uses this as the save path + :type filename: PathLike + :param format: jpeg or png (defaults to jpeg) + :type format: str + :param full_page: when False (default) it captures the current viewport. when True, it captures the entire page + :type full_page: bool + :return: the path/filename of saved screenshot + :rtype: str + """ + # noqa + import urllib.parse + import datetime + + await self.sleep() # update the target's url + path = None + + if format.lower() in ["jpg", "jpeg"]: + ext = ".jpg" + format = "jpeg" + + elif format.lower() in ["png"]: + ext = ".png" + format = "png" + + if not filename or filename == "auto": + parsed = urllib.parse.urlparse(self.target.url) + parts = parsed.path.split("/") + last_part = parts[-1] + last_part = last_part.rsplit("?", 1)[0] + dt_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + candidate = f"{parsed.hostname}__{last_part}_{dt_str}" + path = pathlib.Path(candidate + ext) # noqa + else: + path = pathlib.Path(filename) + path.parent.mkdir(parents=True, exist_ok=True) + data = await self.send( + cdp.page.capture_screenshot( + format_=format, capture_beyond_viewport=full_page + ) + ) + if not data: + raise ProtocolException( + "could not take screenshot. most possible cause is the page has not finished loading yet." + ) + import base64 + + data_bytes = base64.b64decode(data) + if not path: + raise RuntimeError("invalid filename or path: '%s'" % filename) + path.write_bytes(data_bytes) + return str(path)
+ + +
+[docs] + async def set_download_path(self, path: PathLike): + """ + sets the download path and allows downloads + this is required for any download function to work (well not entirely, since when unset we set a default folder) + + :param path: + :type path: + :return: + :rtype: + """ + await self.send( + cdp.browser.set_download_behavior( + behavior="allow", download_path=str(path.resolve()) + ) + ) + self._download_behavior = ["allow", str(path.resolve())]
+ + +
+[docs] + async def get_all_linked_sources(self) -> List["nodriver.Element"]: + """ + get all elements of tag: link, a, img, scripts meta, video, audio + + :return: + """ + all_assets = await self.query_selector_all(selector="a,link,img,script,meta") + return [element.create(asset, self) for asset in all_assets]
+ + +
+[docs] + async def get_all_urls(self, absolute=True) -> List[str]: + """ + convenience function, which returns all links (a,link,img,script,meta) + + :param absolute: try to build all the links in absolute form instead of "as is", often relative + :return: list of urls + """ + + import urllib.parse + + res = [] + all_assets = await self.query_selector_all(selector="a,link,img,script,meta") + for asset in all_assets: + if not absolute: + res.append(asset.src or asset.href) + else: + for k, v in asset.attrs.items(): + if k in ("src", "href"): + if "#" in v: + continue + if not any([_ in v for _ in ("http", "//", "/")]): + continue + abs_url = urllib.parse.urljoin( + "/".join(self.url.rsplit("/")[:3]), v + ) + if not abs_url.startswith(("http", "//", "ws")): + continue + res.append(abs_url) + return res
+ + +
+[docs] + async def verify_cf(self): + """an attempt..""" + checkbox = None + checkbox_sibling = await self.wait_for(text="verify you are human") + if checkbox_sibling: + parent = checkbox_sibling.parent + while parent: + checkbox = await parent.query_selector("input[type=checkbox]") + if checkbox: + break + parent = parent.parent + await checkbox.mouse_move() + await checkbox.mouse_click()
+ + + def __call__( + self, + text: Optional[str] = "", + selector: Optional[str] = "", + timeout: Optional[Union[int, float]] = 10, + ): + """ + alias to query_selector_all or find_elements_by_text, depending + on whether text= is set or selector= is set + + :param selector: css selector string + :type selector: str + :return: + :rtype: + """ + return self.wait_for(text, selector, timeout) + + def __eq__(self, other: Tab): + try: + return other.target == self.target + except (AttributeError, TypeError): + return False + + def __getattr__(self, item): + try: + return getattr(self._target, item) + except AttributeError: + raise AttributeError( + f'"{self.__class__.__name__}" has no attribute "%s"' % item + ) + + def __repr__(self): + extra = "" + if self.target.url: + extra = f"[url: {self.target.url}]" + s = f"<{type(self).__name__} [{self.target_id}] [{self.type_}] {extra}>" + return s
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 0000000..f9c29a2 --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,103 @@ + + +################## +NODRIVER +################## + + +**This package provides next level webscraping and browser automation +using a relatively simple interface.** + +* **This is the official successor of the** `Undetected-Chromedriver `_ **python package.** +* **No more webdriver, no more selenium** + +Direct communication provides even better resistance against web applicatinon firewalls (WAF's), while +performance gets a massive boost. +This module is, contrary to undetected-chromedriver, fully asynchronous. + +What makes this package different from other known packages, +is the optimization to stay undetected for most anti-bot solutions. + +Another focus point is usability and quick prototyping, so expect a lot to work `-as is-` , +with most method parameters having `best practice` defaults. +Using 1 or 2 lines, this is up and running, providing best practice config +by default. + +While usability and convenience is important. It's also easy +to fully customizable everything using the entire array of +`CDP `_ domains, methods and events available. + + +Some features +^^^^^^^^^^^^^^^^^^^^^^ + +* A blazing fast undetected chrome (-ish) automation library + +* No chromedriver binary or Selenium dependency + +* This equals bizarre performance increase and less detections! + +* Up and running in 1 line of code* + +* uses fresh profile on each run, cleans up on exit + +* save and load cookies to file to not repeat tedious login steps + +* smart element lookup, by selector or text, including iframe content. + this could also be used as wait condition for a element to appear, since it will retry + for the duration of until found. + single element lookup by text using tab.find(), accepts a best_match flag, which will not + naively return the first match, but will match candidates by closest matching text length. + +* descriptive __repr__ for elements, which represent the element as html + +* utility function to convert a running undetected_chromedriver.Chrome instance + to a nodriver.Browser instance and contintue from there + +* packed with helpers and utility methods for most used and important operations + +.. + * ```elem.text``` + * ```elem.text_all``` + + * ```elem.parent.parent.parent.attrs``` + * ```anchor_elem.href and anchor_elem['href']``` + * ```anchor_elem.href = 'someotherthing'; await anchor_elem.save()``` + * ```elem.children[-1].children[0].children[4].parent.parent``` + + * ```await html5video_element.record_video()``` + * ```await html5video_element('pause')``` + * ```await html5video_element.apply('''(el) => el.currentTime = 0''')``` + * ```tab = await browser.get(url, new_tab=True)``` + * ```tab_win = await browser.get(url, new_window=True)``` + * ```first = await tab.find('search text')``` + * ```best = await tab.find('search text', best_match=True)``` + * ```all_results = await tab.find_all('search text')``` + * ```first_submit_button = await tab.select(selector='button[type=submit]')``` + * ```inputs_in_form = await tab.select_all('form input')``` + + +Quick start +-------------- +.. toctree:: + nodriver/quickstart + + +Main objects +-------------- +.. toctree:: + + nodriver/classes/browser + nodriver/classes/tab + nodriver/classes/element + nodriver/classes/others_and_helpers + + +CDP object +------------------ + +.. toctree:: + + nodriver/cdp + + diff --git a/docs/_build/html/_sources/nodriver/cdp.rst.txt b/docs/_build/html/_sources/nodriver/cdp.rst.txt new file mode 100644 index 0000000..6f83945 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp.rst.txt @@ -0,0 +1,8 @@ +CDP object +==================== + +.. toctree:: + :glob: + :maxdepth: 1 + + cdp/* \ No newline at end of file diff --git a/docs/_build/html/_sources/nodriver/cdp/accessibility.rst.txt b/docs/_build/html/_sources/nodriver/cdp/accessibility.rst.txt new file mode 100644 index 0000000..6d0b013 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/accessibility.rst.txt @@ -0,0 +1,113 @@ +Accessibility +============= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.accessibility + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: AXNodeId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXValueType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXValueSourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXValueNativeSourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXValueSource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXRelatedNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXProperty + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXPropertyName + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AXNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_ax_node_and_ancestors + +.. autofunction:: get_child_ax_nodes + +.. autofunction:: get_full_ax_tree + +.. autofunction:: get_partial_ax_tree + +.. autofunction:: get_root_ax_node + +.. autofunction:: query_ax_tree + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: LoadComplete + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodesUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/animation.rst.txt b/docs/_build/html/_sources/nodriver/cdp/animation.rst.txt new file mode 100644 index 0000000..d4305bd --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/animation.rst.txt @@ -0,0 +1,102 @@ +Animation +========= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.animation + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: Animation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ViewOrScrollTimeline + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AnimationEffect + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: KeyframesRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: KeyframeStyle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_current_time + +.. autofunction:: get_playback_rate + +.. autofunction:: release_animations + +.. autofunction:: resolve_animation + +.. autofunction:: seek_animations + +.. autofunction:: set_paused + +.. autofunction:: set_playback_rate + +.. autofunction:: set_timing + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AnimationCanceled + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AnimationCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AnimationStarted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AnimationUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/audits.rst.txt b/docs/_build/html/_sources/nodriver/cdp/audits.rst.txt new file mode 100644 index 0000000..43cd853 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/audits.rst.txt @@ -0,0 +1,294 @@ +Audits +====== + +Audits domain allows investigation of page violations and possible improvements. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.audits + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: AffectedCookie + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AffectedRequest + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AffectedFrame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieExclusionReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieWarningReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieOperation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MixedContentResolutionStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MixedContentResourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MixedContentIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BlockedByResponseReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BlockedByResponseIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HeavyAdResolutionStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HeavyAdReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HeavyAdIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContentSecurityPolicyViolationType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SourceCodeLocation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContentSecurityPolicyIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedArrayBufferIssueType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedArrayBufferIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LowTextContrastIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CorsIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingIssueType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedDictionaryError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: QuirksModeIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NavigatorUserAgentIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedDictionaryIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GenericIssueErrorType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GenericIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DeprecationIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BounceTrackingIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieDeprecationMetadataIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ClientHintIssueReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FederatedAuthRequestIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FederatedAuthRequestIssueReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FederatedAuthUserInfoRequestIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FederatedAuthUserInfoRequestIssueReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ClientHintIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FailedRequestInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleSheetLoadingIssueReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StylesheetLoadingIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PropertyRuleIssueReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PropertyRuleIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectorIssueCode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectorIssueDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IssueId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectorIssue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: check_contrast + +.. autofunction:: check_forms_issues + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_encoded_response + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: IssueAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/autofill.rst.txt b/docs/_build/html/_sources/nodriver/cdp/autofill.rst.txt new file mode 100644 index 0000000..ca997f7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/autofill.rst.txt @@ -0,0 +1,87 @@ +Autofill +======== + +Defines commands and events for Autofill. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.autofill + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: CreditCard + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AddressField + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AddressFields + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Address + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AddressUI + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FillingStrategy + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FilledField + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: set_addresses + +.. autofunction:: trigger + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AddressFormFilled + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/background_service.rst.txt b/docs/_build/html/_sources/nodriver/cdp/background_service.rst.txt new file mode 100644 index 0000000..3c2923e --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/background_service.rst.txt @@ -0,0 +1,72 @@ +BackgroundService +================= + +Defines events for background web platform features. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.background_service + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ServiceName + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: EventMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackgroundServiceEvent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear_events + +.. autofunction:: set_recording + +.. autofunction:: start_observing + +.. autofunction:: stop_observing + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: RecordingStateChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackgroundServiceEventReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/browser.rst.txt b/docs/_build/html/_sources/nodriver/cdp/browser.rst.txt new file mode 100644 index 0000000..d7c6119 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/browser.rst.txt @@ -0,0 +1,133 @@ +Browser +======= + +The Browser domain defines methods and events for browser managing. + +.. module:: nodriver.cdp.browser + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: BrowserContextID + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WindowID + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WindowState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Bounds + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionSetting + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionDescriptor + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BrowserCommandId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Bucket + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Histogram + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_privacy_sandbox_enrollment_override + +.. autofunction:: cancel_download + +.. autofunction:: close + +.. autofunction:: crash + +.. autofunction:: crash_gpu_process + +.. autofunction:: execute_browser_command + +.. autofunction:: get_browser_command_line + +.. autofunction:: get_histogram + +.. autofunction:: get_histograms + +.. autofunction:: get_version + +.. autofunction:: get_window_bounds + +.. autofunction:: get_window_for_target + +.. autofunction:: grant_permissions + +.. autofunction:: reset_permissions + +.. autofunction:: set_dock_tile + +.. autofunction:: set_download_behavior + +.. autofunction:: set_permission + +.. autofunction:: set_window_bounds + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DownloadWillBegin + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DownloadProgress + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/cache_storage.rst.txt b/docs/_build/html/_sources/nodriver/cdp/cache_storage.rst.txt new file mode 100644 index 0000000..15ce3b7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/cache_storage.rst.txt @@ -0,0 +1,75 @@ +CacheStorage +============ + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.cache_storage + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: CacheId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CachedResponseType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DataEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Cache + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Header + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CachedResponse + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: delete_cache + +.. autofunction:: delete_entry + +.. autofunction:: request_cache_names + +.. autofunction:: request_cached_response + +.. autofunction:: request_entries + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/cast.rst.txt b/docs/_build/html/_sources/nodriver/cdp/cast.rst.txt new file mode 100644 index 0000000..1320fb2 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/cast.rst.txt @@ -0,0 +1,67 @@ +Cast +==== + +A domain for interacting with Cast, Presentation API, and Remote Playback API +functionalities. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.cast + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: Sink + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: set_sink_to_use + +.. autofunction:: start_desktop_mirroring + +.. autofunction:: start_tab_mirroring + +.. autofunction:: stop_casting + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: SinksUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IssueUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/console.rst.txt b/docs/_build/html/_sources/nodriver/cdp/console.rst.txt new file mode 100644 index 0000000..95cadfd --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/console.rst.txt @@ -0,0 +1,53 @@ +Console +======= + +This domain is deprecated - use Runtime or Log instead. + +.. module:: nodriver.cdp.console + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ConsoleMessage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear_messages + +.. autofunction:: disable + +.. autofunction:: enable + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: MessageAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/css.rst.txt b/docs/_build/html/_sources/nodriver/cdp/css.rst.txt new file mode 100644 index 0000000..4da17a8 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/css.rst.txt @@ -0,0 +1,321 @@ +CSS +=== + +This domain exposes CSS read/write operations. All CSS objects (stylesheets, rules, and styles) +have an associated `id` used in subsequent operations on the related object. Each object type has +a specific `id` structure, and those are not interchangeable between objects of different kinds. +CSS objects can be loaded using the `get*ForNode()` calls (which accept a DOM node id). A client +can also keep track of stylesheets via the `styleSheetAdded`/`styleSheetRemoved` events and +subsequently load the required stylesheet contents using the `getStyleSheet[Text]()` methods. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.css + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: StyleSheetId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleSheetOrigin + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PseudoElementMatches + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InheritedStyleEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InheritedPseudoElementMatches + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RuleMatch + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Value + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Specificity + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SelectorList + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSStyleSheetHeader + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSRuleType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RuleUsage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SourceRange + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShorthandEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSComputedStyleProperty + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSStyle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSProperty + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSMedia + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MediaQuery + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MediaQueryExpression + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSContainerQuery + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSSupports + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSScope + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSLayer + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSLayerData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlatformFontUsage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FontVariationAxis + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FontFace + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSTryRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSPositionFallbackRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSPositionTryRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSKeyframesRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSPropertyRegistration + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSFontPaletteValuesRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSPropertyRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSKeyframeRule + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleDeclarationEdit + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_rule + +.. autofunction:: collect_class_names + +.. autofunction:: create_style_sheet + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: force_pseudo_state + +.. autofunction:: get_background_colors + +.. autofunction:: get_computed_style_for_node + +.. autofunction:: get_inline_styles_for_node + +.. autofunction:: get_layers_for_node + +.. autofunction:: get_location_for_selector + +.. autofunction:: get_matched_styles_for_node + +.. autofunction:: get_media_queries + +.. autofunction:: get_platform_fonts_for_node + +.. autofunction:: get_style_sheet_text + +.. autofunction:: set_container_query_text + +.. autofunction:: set_effective_property_value_for_node + +.. autofunction:: set_keyframe_key + +.. autofunction:: set_local_fonts_enabled + +.. autofunction:: set_media_text + +.. autofunction:: set_property_rule_property_name + +.. autofunction:: set_rule_selector + +.. autofunction:: set_scope_text + +.. autofunction:: set_style_sheet_text + +.. autofunction:: set_style_texts + +.. autofunction:: set_supports_text + +.. autofunction:: start_rule_usage_tracking + +.. autofunction:: stop_rule_usage_tracking + +.. autofunction:: take_computed_style_updates + +.. autofunction:: take_coverage_delta + +.. autofunction:: track_computed_style_updates + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: FontsUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MediaQueryResultChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleSheetAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleSheetChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StyleSheetRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/database.rst.txt b/docs/_build/html/_sources/nodriver/cdp/database.rst.txt new file mode 100644 index 0000000..4f315f6 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/database.rst.txt @@ -0,0 +1,65 @@ +Database +======== + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.database + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: DatabaseId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Database + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Error + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: execute_sql + +.. autofunction:: get_database_table_names + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AddDatabase + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/debugger.rst.txt b/docs/_build/html/_sources/nodriver/cdp/debugger.rst.txt new file mode 100644 index 0000000..c66e9a0 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/debugger.rst.txt @@ -0,0 +1,187 @@ +Debugger +======== + +Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing +breakpoints, stepping through execution, exploring stack traces, etc. + +.. module:: nodriver.cdp.debugger + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: BreakpointId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CallFrameId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Location + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptPosition + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LocationRange + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CallFrame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Scope + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SearchMatch + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BreakLocation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WasmDisassemblyChunk + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptLanguage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DebugSymbols + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: continue_to_location + +.. autofunction:: disable + +.. autofunction:: disassemble_wasm_module + +.. autofunction:: enable + +.. autofunction:: evaluate_on_call_frame + +.. autofunction:: get_possible_breakpoints + +.. autofunction:: get_script_source + +.. autofunction:: get_stack_trace + +.. autofunction:: get_wasm_bytecode + +.. autofunction:: next_wasm_disassembly_chunk + +.. autofunction:: pause + +.. autofunction:: pause_on_async_call + +.. autofunction:: remove_breakpoint + +.. autofunction:: restart_frame + +.. autofunction:: resume + +.. autofunction:: search_in_content + +.. autofunction:: set_async_call_stack_depth + +.. autofunction:: set_blackbox_patterns + +.. autofunction:: set_blackboxed_ranges + +.. autofunction:: set_breakpoint + +.. autofunction:: set_breakpoint_by_url + +.. autofunction:: set_breakpoint_on_function_call + +.. autofunction:: set_breakpoints_active + +.. autofunction:: set_instrumentation_breakpoint + +.. autofunction:: set_pause_on_exceptions + +.. autofunction:: set_return_value + +.. autofunction:: set_script_source + +.. autofunction:: set_skip_all_pauses + +.. autofunction:: set_variable_value + +.. autofunction:: step_into + +.. autofunction:: step_out + +.. autofunction:: step_over + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: BreakpointResolved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Paused + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Resumed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptFailedToParse + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptParsed + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/device_access.rst.txt b/docs/_build/html/_sources/nodriver/cdp/device_access.rst.txt new file mode 100644 index 0000000..e311486 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/device_access.rst.txt @@ -0,0 +1,65 @@ +DeviceAccess +============ + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.device_access + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: RequestId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DeviceId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PromptDevice + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: cancel_prompt + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: select_prompt + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DeviceRequestPrompted + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/device_orientation.rst.txt b/docs/_build/html/_sources/nodriver/cdp/device_orientation.rst.txt new file mode 100644 index 0000000..2e644ce --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/device_orientation.rst.txt @@ -0,0 +1,36 @@ +DeviceOrientation +================= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.device_orientation + +* Types_ +* Commands_ +* Events_ + +Types +----- + +*There are no types in this module.* + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear_device_orientation_override + +.. autofunction:: set_device_orientation_override + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/dom.rst.txt b/docs/_build/html/_sources/nodriver/cdp/dom.rst.txt new file mode 100644 index 0000000..c5c468e --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/dom.rst.txt @@ -0,0 +1,300 @@ +DOM +=== + +This domain exposes DOM read/write operations. Each DOM Node is represented with its mirror object +that has an `id`. This `id` can be used to get additional information on the Node, resolve it into +the JavaScript object wrapper, etc. It is important that client receives DOM events only for the +nodes that are known to the client. Backend keeps track of the nodes that were sent to the client +and never sends the same node twice. It is client's responsibility to collect information about +the nodes that were sent to the client. Note that `iframe` owner elements will return +corresponding document elements as their child nodes. + +.. module:: nodriver.cdp.dom + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: NodeId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackendNodeId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackendNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PseudoType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShadowRootType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CompatibilityMode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PhysicalAxes + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LogicalAxes + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScrollOrientation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Node + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RGBA + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Quad + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BoxModel + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShapeOutsideInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Rect + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSSComputedStyleProperty + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: collect_class_names_from_subtree + +.. autofunction:: copy_to + +.. autofunction:: describe_node + +.. autofunction:: disable + +.. autofunction:: discard_search_results + +.. autofunction:: enable + +.. autofunction:: focus + +.. autofunction:: get_anchor_element + +.. autofunction:: get_attributes + +.. autofunction:: get_box_model + +.. autofunction:: get_container_for_node + +.. autofunction:: get_content_quads + +.. autofunction:: get_document + +.. autofunction:: get_element_by_relation + +.. autofunction:: get_file_info + +.. autofunction:: get_flattened_document + +.. autofunction:: get_frame_owner + +.. autofunction:: get_node_for_location + +.. autofunction:: get_node_stack_traces + +.. autofunction:: get_nodes_for_subtree_by_style + +.. autofunction:: get_outer_html + +.. autofunction:: get_querying_descendants_for_container + +.. autofunction:: get_relayout_boundary + +.. autofunction:: get_search_results + +.. autofunction:: get_top_layer_elements + +.. autofunction:: hide_highlight + +.. autofunction:: highlight_node + +.. autofunction:: highlight_rect + +.. autofunction:: mark_undoable_state + +.. autofunction:: move_to + +.. autofunction:: perform_search + +.. autofunction:: push_node_by_path_to_frontend + +.. autofunction:: push_nodes_by_backend_ids_to_frontend + +.. autofunction:: query_selector + +.. autofunction:: query_selector_all + +.. autofunction:: redo + +.. autofunction:: remove_attribute + +.. autofunction:: remove_node + +.. autofunction:: request_child_nodes + +.. autofunction:: request_node + +.. autofunction:: resolve_node + +.. autofunction:: scroll_into_view_if_needed + +.. autofunction:: set_attribute_value + +.. autofunction:: set_attributes_as_text + +.. autofunction:: set_file_input_files + +.. autofunction:: set_inspected_node + +.. autofunction:: set_node_name + +.. autofunction:: set_node_stack_traces_enabled + +.. autofunction:: set_node_value + +.. autofunction:: set_outer_html + +.. autofunction:: undo + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AttributeModified + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributeRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CharacterDataModified + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ChildNodeCountUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ChildNodeInserted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ChildNodeRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DistributedNodesUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DocumentUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InlineStyleInvalidated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PseudoElementAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TopLayerElementsUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PseudoElementRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SetChildNodes + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShadowRootPopped + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShadowRootPushed + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/dom_debugger.rst.txt b/docs/_build/html/_sources/nodriver/cdp/dom_debugger.rst.txt new file mode 100644 index 0000000..aedb6af --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/dom_debugger.rst.txt @@ -0,0 +1,71 @@ +DOMDebugger +=========== + +DOM debugging allows setting breakpoints on particular DOM operations and events. JavaScript +execution will stop on these operations as if there was a regular breakpoint set. + +.. module:: nodriver.cdp.dom_debugger + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: DOMBreakpointType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CSPViolationType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: EventListener + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: get_event_listeners + +.. autofunction:: remove_dom_breakpoint + +.. autofunction:: remove_event_listener_breakpoint + +.. autofunction:: remove_instrumentation_breakpoint + +.. autofunction:: remove_xhr_breakpoint + +.. autofunction:: set_break_on_csp_violation + +.. autofunction:: set_dom_breakpoint + +.. autofunction:: set_event_listener_breakpoint + +.. autofunction:: set_instrumentation_breakpoint + +.. autofunction:: set_xhr_breakpoint + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/dom_snapshot.rst.txt b/docs/_build/html/_sources/nodriver/cdp/dom_snapshot.rst.txt new file mode 100644 index 0000000..2379dca --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/dom_snapshot.rst.txt @@ -0,0 +1,120 @@ +DOMSnapshot +=========== + +This domain facilitates obtaining document snapshots with DOM, layout, and style information. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.dom_snapshot + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: DOMNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InlineTextBox + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayoutTreeNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ComputedStyle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NameValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StringIndex + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ArrayOfStrings + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RareStringData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RareBooleanData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RareIntegerData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Rectangle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DocumentSnapshot + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodeTreeSnapshot + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayoutTreeSnapshot + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TextBoxSnapshot + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: capture_snapshot + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_snapshot + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/dom_storage.rst.txt b/docs/_build/html/_sources/nodriver/cdp/dom_storage.rst.txt new file mode 100644 index 0000000..c631770 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/dom_storage.rst.txt @@ -0,0 +1,86 @@ +DOMStorage +========== + +Query and modify DOM storage. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.dom_storage + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: SerializedStorageKey + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Item + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_dom_storage_items + +.. autofunction:: remove_dom_storage_item + +.. autofunction:: set_dom_storage_item + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DomStorageItemAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DomStorageItemRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DomStorageItemUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DomStorageItemsCleared + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/emulation.rst.txt b/docs/_build/html/_sources/nodriver/cdp/emulation.rst.txt new file mode 100644 index 0000000..868eaa2 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/emulation.rst.txt @@ -0,0 +1,180 @@ +Emulation +========= + +This domain emulates different environments for the page. + +.. module:: nodriver.cdp.emulation + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ScreenOrientation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DisplayFeature + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DevicePosture + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MediaFeature + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VirtualTimePolicy + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UserAgentBrandVersion + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UserAgentMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorReadingSingle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorReadingXYZ + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorReadingQuaternion + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SensorReading + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DisabledImageType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: can_emulate + +.. autofunction:: clear_device_metrics_override + +.. autofunction:: clear_device_posture_override + +.. autofunction:: clear_geolocation_override + +.. autofunction:: clear_idle_override + +.. autofunction:: get_overridden_sensor_information + +.. autofunction:: reset_page_scale_factor + +.. autofunction:: set_auto_dark_mode_override + +.. autofunction:: set_automation_override + +.. autofunction:: set_cpu_throttling_rate + +.. autofunction:: set_default_background_color_override + +.. autofunction:: set_device_metrics_override + +.. autofunction:: set_device_posture_override + +.. autofunction:: set_disabled_image_types + +.. autofunction:: set_document_cookie_disabled + +.. autofunction:: set_emit_touch_events_for_mouse + +.. autofunction:: set_emulated_media + +.. autofunction:: set_emulated_vision_deficiency + +.. autofunction:: set_focus_emulation_enabled + +.. autofunction:: set_geolocation_override + +.. autofunction:: set_hardware_concurrency_override + +.. autofunction:: set_idle_override + +.. autofunction:: set_locale_override + +.. autofunction:: set_navigator_overrides + +.. autofunction:: set_page_scale_factor + +.. autofunction:: set_script_execution_disabled + +.. autofunction:: set_scrollbars_hidden + +.. autofunction:: set_sensor_override_enabled + +.. autofunction:: set_sensor_override_readings + +.. autofunction:: set_timezone_override + +.. autofunction:: set_touch_emulation_enabled + +.. autofunction:: set_user_agent_override + +.. autofunction:: set_virtual_time_policy + +.. autofunction:: set_visible_size + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: VirtualTimeBudgetExpired + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/event_breakpoints.rst.txt b/docs/_build/html/_sources/nodriver/cdp/event_breakpoints.rst.txt new file mode 100644 index 0000000..0a1446b --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/event_breakpoints.rst.txt @@ -0,0 +1,42 @@ +EventBreakpoints +================ + +EventBreakpoints permits setting JavaScript breakpoints on operations and events +occurring in native code invoked from JavaScript. Once breakpoint is hit, it is +reported through Debugger domain, similarly to regular breakpoints being hit. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.event_breakpoints + +* Types_ +* Commands_ +* Events_ + +Types +----- + +*There are no types in this module.* + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: remove_instrumentation_breakpoint + +.. autofunction:: set_instrumentation_breakpoint + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/extensions.rst.txt b/docs/_build/html/_sources/nodriver/cdp/extensions.rst.txt new file mode 100644 index 0000000..6c1011e --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/extensions.rst.txt @@ -0,0 +1,38 @@ +Extensions +========== + +Defines commands and events for browser extensions. Available if the client +is connected using the --remote-debugging-pipe flag and +the --enable-unsafe-extension-debugging flag is set. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.extensions + +* Types_ +* Commands_ +* Events_ + +Types +----- + +*There are no types in this module.* + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: load_unpacked + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/fed_cm.rst.txt b/docs/_build/html/_sources/nodriver/cdp/fed_cm.rst.txt new file mode 100644 index 0000000..51ffdac --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/fed_cm.rst.txt @@ -0,0 +1,88 @@ +FedCm +===== + +This domain allows interacting with the FedCM dialog. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.fed_cm + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: LoginState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DialogType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DialogButton + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AccountUrlType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Account + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: click_dialog_button + +.. autofunction:: disable + +.. autofunction:: dismiss_dialog + +.. autofunction:: enable + +.. autofunction:: open_url + +.. autofunction:: reset_cooldown + +.. autofunction:: select_account + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DialogShown + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DialogClosed + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/fetch.rst.txt b/docs/_build/html/_sources/nodriver/cdp/fetch.rst.txt new file mode 100644 index 0000000..3b4eb74 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/fetch.rst.txt @@ -0,0 +1,95 @@ +Fetch +===== + +A domain for letting clients substitute browser's network layer with client code. + +.. module:: nodriver.cdp.fetch + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: RequestId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestStage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestPattern + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HeaderEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthChallenge + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthChallengeResponse + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: continue_request + +.. autofunction:: continue_response + +.. autofunction:: continue_with_auth + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: fail_request + +.. autofunction:: fulfill_request + +.. autofunction:: get_response_body + +.. autofunction:: take_response_body_as_stream + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: RequestPaused + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthRequired + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/headless_experimental.rst.txt b/docs/_build/html/_sources/nodriver/cdp/headless_experimental.rst.txt new file mode 100644 index 0000000..914a7e6 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/headless_experimental.rst.txt @@ -0,0 +1,48 @@ +HeadlessExperimental +==================== + +This domain provides experimental commands only supported in headless mode. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.headless_experimental + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ScreenshotParams + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: begin_frame + +.. autofunction:: disable + +.. autofunction:: enable + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/heap_profiler.rst.txt b/docs/_build/html/_sources/nodriver/cdp/heap_profiler.rst.txt new file mode 100644 index 0000000..9691798 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/heap_profiler.rst.txt @@ -0,0 +1,106 @@ +HeapProfiler +============ + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.heap_profiler + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: HeapSnapshotObjectId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SamplingHeapProfileNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SamplingHeapProfileSample + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SamplingHeapProfile + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_inspected_heap_object + +.. autofunction:: collect_garbage + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_heap_object_id + +.. autofunction:: get_object_by_heap_object_id + +.. autofunction:: get_sampling_profile + +.. autofunction:: start_sampling + +.. autofunction:: start_tracking_heap_objects + +.. autofunction:: stop_sampling + +.. autofunction:: stop_tracking_heap_objects + +.. autofunction:: take_heap_snapshot + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AddHeapSnapshotChunk + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HeapStatsUpdate + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LastSeenObjectId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportHeapSnapshotProgress + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResetProfiles + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/indexed_db.rst.txt b/docs/_build/html/_sources/nodriver/cdp/indexed_db.rst.txt new file mode 100644 index 0000000..00b86de --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/indexed_db.rst.txt @@ -0,0 +1,88 @@ +IndexedDB +========= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.indexed_db + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: DatabaseWithObjectStores + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ObjectStore + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ObjectStoreIndex + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Key + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: KeyRange + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DataEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: KeyPath + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear_object_store + +.. autofunction:: delete_database + +.. autofunction:: delete_object_store_entries + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_metadata + +.. autofunction:: request_data + +.. autofunction:: request_database + +.. autofunction:: request_database_names + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/input_.rst.txt b/docs/_build/html/_sources/nodriver/cdp/input_.rst.txt new file mode 100644 index 0000000..4af74a9 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/input_.rst.txt @@ -0,0 +1,96 @@ +Input +===== + +.. module:: nodriver.cdp.input_ + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: TouchPoint + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GestureSourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MouseButton + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TimeSinceEpoch + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DragDataItem + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DragData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: cancel_dragging + +.. autofunction:: dispatch_drag_event + +.. autofunction:: dispatch_key_event + +.. autofunction:: dispatch_mouse_event + +.. autofunction:: dispatch_touch_event + +.. autofunction:: emulate_touch_from_mouse_event + +.. autofunction:: ime_set_composition + +.. autofunction:: insert_text + +.. autofunction:: set_ignore_input_events + +.. autofunction:: set_intercept_drags + +.. autofunction:: synthesize_pinch_gesture + +.. autofunction:: synthesize_scroll_gesture + +.. autofunction:: synthesize_tap_gesture + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DragIntercepted + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/inspector.rst.txt b/docs/_build/html/_sources/nodriver/cdp/inspector.rst.txt new file mode 100644 index 0000000..e659aa0 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/inspector.rst.txt @@ -0,0 +1,53 @@ +Inspector +========= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.inspector + +* Types_ +* Commands_ +* Events_ + +Types +----- + +*There are no types in this module.* + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: Detached + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetCrashed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetReloadedAfterCrash + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/io.rst.txt b/docs/_build/html/_sources/nodriver/cdp/io.rst.txt new file mode 100644 index 0000000..f87ae91 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/io.rst.txt @@ -0,0 +1,46 @@ +IO +== + +Input/Output operations for streams produced by DevTools. + +.. module:: nodriver.cdp.io + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: StreamHandle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: close + +.. autofunction:: read + +.. autofunction:: resolve_blob + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/layer_tree.rst.txt b/docs/_build/html/_sources/nodriver/cdp/layer_tree.rst.txt new file mode 100644 index 0000000..1448ebe --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/layer_tree.rst.txt @@ -0,0 +1,100 @@ +LayerTree +========= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.layer_tree + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: LayerId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SnapshotId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScrollRect + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StickyPositionConstraint + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PictureTile + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Layer + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PaintProfile + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: compositing_reasons + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: load_snapshot + +.. autofunction:: make_snapshot + +.. autofunction:: profile_snapshot + +.. autofunction:: release_snapshot + +.. autofunction:: replay_snapshot + +.. autofunction:: snapshot_command_log + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: LayerPainted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayerTreeDidChange + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/log.rst.txt b/docs/_build/html/_sources/nodriver/cdp/log.rst.txt new file mode 100644 index 0000000..20a587a --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/log.rst.txt @@ -0,0 +1,62 @@ +Log +=== + +Provides access to log entries. + +.. module:: nodriver.cdp.log + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: LogEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ViolationSetting + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: start_violations_report + +.. autofunction:: stop_violations_report + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: EntryAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/media.rst.txt b/docs/_build/html/_sources/nodriver/cdp/media.rst.txt new file mode 100644 index 0000000..a12820c --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/media.rst.txt @@ -0,0 +1,103 @@ +Media +===== + +This domain allows detailed inspection of media elements + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.media + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: PlayerId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Timestamp + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerMessage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerProperty + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerEvent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerErrorSourceLocation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: PlayerPropertiesChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerEventsAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerMessagesLogged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayerErrorsRaised + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PlayersCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/memory.rst.txt b/docs/_build/html/_sources/nodriver/cdp/memory.rst.txt new file mode 100644 index 0000000..cb21e6c --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/memory.rst.txt @@ -0,0 +1,75 @@ +Memory +====== + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.memory + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: PressureLevel + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SamplingProfileNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SamplingProfile + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Module + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: forcibly_purge_java_script_memory + +.. autofunction:: get_all_time_sampling_profile + +.. autofunction:: get_browser_sampling_profile + +.. autofunction:: get_dom_counters + +.. autofunction:: get_sampling_profile + +.. autofunction:: prepare_for_leak_detection + +.. autofunction:: set_pressure_notifications_suppressed + +.. autofunction:: simulate_pressure_notification + +.. autofunction:: start_sampling + +.. autofunction:: stop_sampling + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/network.rst.txt b/docs/_build/html/_sources/nodriver/cdp/network.rst.txt new file mode 100644 index 0000000..30eecbb --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/network.rst.txt @@ -0,0 +1,616 @@ +Network +======= + +Network domain allows tracking network activities of the page. It exposes information about http, +file, data and other requests and responses, their headers, bodies, timing, etc. + +.. module:: nodriver.cdp.network + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ResourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoaderId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterceptionId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ErrorReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TimeSinceEpoch + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MonotonicTime + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Headers + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ConnectionType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieSameSite + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookiePriority + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieSourceScheme + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResourceTiming + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResourcePriority + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PostDataEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Request + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedCertificateTimestamp + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecurityDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CertificateTransparencyCompliance + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BlockedReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CorsError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CorsErrorStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerResponseSource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TrustTokenParams + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TrustTokenOperationType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AlternateProtocolUsage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerRouterSource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerRouterInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Response + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketRequest + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketResponse + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketFrame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CachedResource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Initiator + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookiePartitionKey + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Cookie + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SetCookieBlockedReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieBlockedReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieExemptionReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BlockedSetCookieWithReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExemptedSetCookieWithReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AssociatedCookie + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CookieParam + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthChallenge + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthChallengeResponse + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterceptionStage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestPattern + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeSignature + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeHeader + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeErrorField + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContentEncoding + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrivateNetworkRequestPolicy + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IPAddressSpace + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ConnectTiming + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ClientSecurityState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CrossOriginOpenerPolicyValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CrossOriginOpenerPolicyStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CrossOriginEmbedderPolicyValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CrossOriginEmbedderPolicyStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContentSecurityPolicySource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContentSecurityPolicyStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecurityIsolationStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportingApiReport + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportingApiEndpoint + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoadNetworkResourcePageResult + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoadNetworkResourceOptions + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: can_clear_browser_cache + +.. autofunction:: can_clear_browser_cookies + +.. autofunction:: can_emulate_network_conditions + +.. autofunction:: clear_accepted_encodings_override + +.. autofunction:: clear_browser_cache + +.. autofunction:: clear_browser_cookies + +.. autofunction:: continue_intercepted_request + +.. autofunction:: delete_cookies + +.. autofunction:: disable + +.. autofunction:: emulate_network_conditions + +.. autofunction:: enable + +.. autofunction:: enable_reporting_api + +.. autofunction:: get_all_cookies + +.. autofunction:: get_certificate + +.. autofunction:: get_cookies + +.. autofunction:: get_request_post_data + +.. autofunction:: get_response_body + +.. autofunction:: get_response_body_for_interception + +.. autofunction:: get_security_isolation_status + +.. autofunction:: load_network_resource + +.. autofunction:: replay_xhr + +.. autofunction:: search_in_response_body + +.. autofunction:: set_accepted_encodings + +.. autofunction:: set_attach_debug_stack + +.. autofunction:: set_blocked_ur_ls + +.. autofunction:: set_bypass_service_worker + +.. autofunction:: set_cache_disabled + +.. autofunction:: set_cookie + +.. autofunction:: set_cookies + +.. autofunction:: set_extra_http_headers + +.. autofunction:: set_request_interception + +.. autofunction:: set_user_agent_override + +.. autofunction:: stream_resource_content + +.. autofunction:: take_response_body_for_interception_as_stream + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DataReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: EventSourceMessageReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoadingFailed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoadingFinished + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestIntercepted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestServedFromCache + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestWillBeSent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResourceChangedPriority + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedExchangeReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResponseReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketClosed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketFrameError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketFrameReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketFrameSent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketHandshakeResponseReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebSocketWillSendHandshakeRequest + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebTransportCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebTransportConnectionEstablished + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebTransportClosed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RequestWillBeSentExtraInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResponseReceivedExtraInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ResponseReceivedEarlyHints + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TrustTokenOperationDone + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PolicyUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SubresourceWebBundleMetadataReceived + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SubresourceWebBundleMetadataError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SubresourceWebBundleInnerResponseParsed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SubresourceWebBundleInnerResponseError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportingApiReportAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportingApiReportUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReportingApiEndpointsChangedForOrigin + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/overlay.rst.txt b/docs/_build/html/_sources/nodriver/cdp/overlay.rst.txt new file mode 100644 index 0000000..7e86745 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/overlay.rst.txt @@ -0,0 +1,217 @@ +Overlay +======= + +This domain provides various functionality related to drawing atop the inspected page. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.overlay + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: SourceOrderConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GridHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FlexContainerHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FlexItemHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LineStyle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BoxStyle + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContrastAlgorithm + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ColorFormat + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GridNodeHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FlexNodeHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScrollSnapContainerHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScrollSnapHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: HingeConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WindowControlsOverlayConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContainerQueryHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContainerQueryContainerHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IsolatedElementHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IsolationModeHighlightConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectMode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_grid_highlight_objects_for_test + +.. autofunction:: get_highlight_object_for_test + +.. autofunction:: get_source_order_highlight_object_for_test + +.. autofunction:: hide_highlight + +.. autofunction:: highlight_frame + +.. autofunction:: highlight_node + +.. autofunction:: highlight_quad + +.. autofunction:: highlight_rect + +.. autofunction:: highlight_source_order + +.. autofunction:: set_inspect_mode + +.. autofunction:: set_paused_in_debugger_message + +.. autofunction:: set_show_ad_highlights + +.. autofunction:: set_show_container_query_overlays + +.. autofunction:: set_show_debug_borders + +.. autofunction:: set_show_flex_overlays + +.. autofunction:: set_show_fps_counter + +.. autofunction:: set_show_grid_overlays + +.. autofunction:: set_show_hinge + +.. autofunction:: set_show_hit_test_borders + +.. autofunction:: set_show_isolated_elements + +.. autofunction:: set_show_layout_shift_regions + +.. autofunction:: set_show_paint_rects + +.. autofunction:: set_show_scroll_bottleneck_rects + +.. autofunction:: set_show_scroll_snap_overlays + +.. autofunction:: set_show_viewport_size_on_resize + +.. autofunction:: set_show_web_vitals + +.. autofunction:: set_show_window_controls_overlay + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: InspectNodeRequested + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodeHighlightRequested + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScreenshotRequested + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectModeCanceled + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/page.rst.txt b/docs/_build/html/_sources/nodriver/cdp/page.rst.txt new file mode 100644 index 0000000..9662191 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/page.rst.txt @@ -0,0 +1,582 @@ +Page +==== + +Actions and events related to the inspected page belong to the page domain. + +.. module:: nodriver.cdp.page + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: FrameId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AdFrameType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AdFrameExplanation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AdFrameStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AdScriptId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecureContextType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CrossOriginIsolatedContextType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GatedAPIFeatures + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionsPolicyFeature + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionsPolicyBlockReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionsPolicyBlockLocator + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PermissionsPolicyFeatureState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrialTokenStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrialStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrialUsageRestriction + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrialToken + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrialTokenWithStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: OriginTrial + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Frame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameResource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameResourceTree + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameTree + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptIdentifier + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TransitionType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NavigationEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScreencastFrameMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DialogType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AppManifestError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AppManifestParsedProperties + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayoutViewport + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VisualViewport + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Viewport + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FontFamilies + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptFontFamilies + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FontSizes + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ClientNavigationReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ClientNavigationDisposition + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InstallabilityErrorArgument + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InstallabilityError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReferrerPolicy + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CompilationCacheParams + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FileFilter + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FileHandler + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ImageResource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LaunchHandler + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ProtocolHandler + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RelatedApplication + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScopeExtension + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Screenshot + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ShareTarget + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Shortcut + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WebAppManifest + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AutoResponseMode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NavigationType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheNotRestoredReason + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheNotRestoredReasonType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheBlockingDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheNotRestoredExplanation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheNotRestoredExplanationTree + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_compilation_cache + +.. autofunction:: add_script_to_evaluate_on_load + +.. autofunction:: add_script_to_evaluate_on_new_document + +.. autofunction:: bring_to_front + +.. autofunction:: capture_screenshot + +.. autofunction:: capture_snapshot + +.. autofunction:: clear_compilation_cache + +.. autofunction:: clear_device_metrics_override + +.. autofunction:: clear_device_orientation_override + +.. autofunction:: clear_geolocation_override + +.. autofunction:: close + +.. autofunction:: crash + +.. autofunction:: create_isolated_world + +.. autofunction:: delete_cookie + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: generate_test_report + +.. autofunction:: get_ad_script_id + +.. autofunction:: get_app_id + +.. autofunction:: get_app_manifest + +.. autofunction:: get_frame_tree + +.. autofunction:: get_installability_errors + +.. autofunction:: get_layout_metrics + +.. autofunction:: get_manifest_icons + +.. autofunction:: get_navigation_history + +.. autofunction:: get_origin_trials + +.. autofunction:: get_permissions_policy_state + +.. autofunction:: get_resource_content + +.. autofunction:: get_resource_tree + +.. autofunction:: handle_java_script_dialog + +.. autofunction:: navigate + +.. autofunction:: navigate_to_history_entry + +.. autofunction:: print_to_pdf + +.. autofunction:: produce_compilation_cache + +.. autofunction:: reload + +.. autofunction:: remove_script_to_evaluate_on_load + +.. autofunction:: remove_script_to_evaluate_on_new_document + +.. autofunction:: reset_navigation_history + +.. autofunction:: screencast_frame_ack + +.. autofunction:: search_in_resource + +.. autofunction:: set_ad_blocking_enabled + +.. autofunction:: set_bypass_csp + +.. autofunction:: set_device_metrics_override + +.. autofunction:: set_device_orientation_override + +.. autofunction:: set_document_content + +.. autofunction:: set_download_behavior + +.. autofunction:: set_font_families + +.. autofunction:: set_font_sizes + +.. autofunction:: set_geolocation_override + +.. autofunction:: set_intercept_file_chooser_dialog + +.. autofunction:: set_lifecycle_events_enabled + +.. autofunction:: set_prerendering_allowed + +.. autofunction:: set_rph_registration_mode + +.. autofunction:: set_spc_transaction_mode + +.. autofunction:: set_touch_emulation_enabled + +.. autofunction:: set_web_lifecycle_state + +.. autofunction:: start_screencast + +.. autofunction:: stop_loading + +.. autofunction:: stop_screencast + +.. autofunction:: wait_for_debugger + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: DomContentEventFired + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FileChooserOpened + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameAttached + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameClearedScheduledNavigation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameDetached + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameNavigated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DocumentOpened + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameResized + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameRequestedNavigation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameScheduledNavigation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameStartedLoading + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FrameStoppedLoading + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DownloadWillBegin + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DownloadProgress + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterstitialHidden + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterstitialShown + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: JavascriptDialogClosed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: JavascriptDialogOpening + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LifecycleEvent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BackForwardCacheNotUsed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LoadEventFired + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NavigatedWithinDocument + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScreencastFrame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScreencastVisibilityChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WindowOpen + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CompilationCacheProduced + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/performance.rst.txt b/docs/_build/html/_sources/nodriver/cdp/performance.rst.txt new file mode 100644 index 0000000..0ccaba7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/performance.rst.txt @@ -0,0 +1,53 @@ +Performance +=========== + +.. module:: nodriver.cdp.performance + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: Metric + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_metrics + +.. autofunction:: set_time_domain + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: Metrics + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/performance_timeline.rst.txt b/docs/_build/html/_sources/nodriver/cdp/performance_timeline.rst.txt new file mode 100644 index 0000000..b379172 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/performance_timeline.rst.txt @@ -0,0 +1,67 @@ +PerformanceTimeline +=================== + +Reporting of performance timeline events, as specified in +https://w3c.github.io/performance-timeline/#dom-performanceobserver. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.performance_timeline + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: LargestContentfulPaint + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayoutShiftAttribution + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: LayoutShift + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TimelineEvent + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: enable + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: TimelineEventAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/preload.rst.txt b/docs/_build/html/_sources/nodriver/cdp/preload.rst.txt new file mode 100644 index 0000000..ac3d989 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/preload.rst.txt @@ -0,0 +1,126 @@ +Preload +======= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.preload + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: RuleSetId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RuleSet + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RuleSetErrorType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SpeculationAction + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SpeculationTargetHint + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreloadingAttemptKey + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreloadingAttemptSource + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrerenderFinalStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreloadingStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrefetchStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrerenderMismatchedHeaders + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: RuleSetUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RuleSetRemoved + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreloadEnabledStateUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrefetchStatusUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrerenderStatusUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreloadingAttemptSourcesUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/profiler.rst.txt b/docs/_build/html/_sources/nodriver/cdp/profiler.rst.txt new file mode 100644 index 0000000..0333b49 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/profiler.rst.txt @@ -0,0 +1,98 @@ +Profiler +======== + +.. module:: nodriver.cdp.profiler + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ProfileNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Profile + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PositionTickInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CoverageRange + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FunctionCoverage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ScriptCoverage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_best_effort_coverage + +.. autofunction:: set_sampling_interval + +.. autofunction:: start + +.. autofunction:: start_precise_coverage + +.. autofunction:: stop + +.. autofunction:: stop_precise_coverage + +.. autofunction:: take_precise_coverage + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: ConsoleProfileFinished + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ConsoleProfileStarted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PreciseCoverageDeltaUpdate + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/pwa.rst.txt b/docs/_build/html/_sources/nodriver/cdp/pwa.rst.txt new file mode 100644 index 0000000..eb2d925 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/pwa.rst.txt @@ -0,0 +1,66 @@ +PWA +=== + +This domain allows interacting with the browser to control PWAs. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.pwa + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: FileHandlerAccept + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FileHandler + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DisplayMode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: change_app_user_settings + +.. autofunction:: get_os_app_state + +.. autofunction:: install + +.. autofunction:: launch + +.. autofunction:: launch_files_in_app + +.. autofunction:: open_current_page_in_app + +.. autofunction:: uninstall + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/runtime.rst.txt b/docs/_build/html/_sources/nodriver/cdp/runtime.rst.txt new file mode 100644 index 0000000..bd30b29 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/runtime.rst.txt @@ -0,0 +1,242 @@ +Runtime +======= + +Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. +Evaluation results are returned as mirror object that expose object type, string representation +and unique identifier that can be used for further object reference. Original objects are +maintained in memory unless they are either explicitly released or are released along with the +other objects in their object group. + +.. module:: nodriver.cdp.runtime + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: ScriptId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SerializationOptions + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DeepSerializedValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RemoteObjectId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UnserializableValue + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RemoteObject + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CustomPreview + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ObjectPreview + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PropertyPreview + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: EntryPreview + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PropertyDescriptor + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InternalPropertyDescriptor + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: PrivatePropertyDescriptor + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CallArgument + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExecutionContextId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExecutionContextDescription + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExceptionDetails + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Timestamp + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TimeDelta + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CallFrame + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StackTrace + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UniqueDebuggerId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StackTraceId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_binding + +.. autofunction:: await_promise + +.. autofunction:: call_function_on + +.. autofunction:: compile_script + +.. autofunction:: disable + +.. autofunction:: discard_console_entries + +.. autofunction:: enable + +.. autofunction:: evaluate + +.. autofunction:: get_exception_details + +.. autofunction:: get_heap_usage + +.. autofunction:: get_isolate_id + +.. autofunction:: get_properties + +.. autofunction:: global_lexical_scope_names + +.. autofunction:: query_objects + +.. autofunction:: release_object + +.. autofunction:: release_object_group + +.. autofunction:: remove_binding + +.. autofunction:: run_if_waiting_for_debugger + +.. autofunction:: run_script + +.. autofunction:: set_async_call_stack_depth + +.. autofunction:: set_custom_object_formatter_enabled + +.. autofunction:: set_max_call_stack_size_to_capture + +.. autofunction:: terminate_execution + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: BindingCalled + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ConsoleAPICalled + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExceptionRevoked + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExceptionThrown + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExecutionContextCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExecutionContextDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ExecutionContextsCleared + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InspectRequested + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/schema.rst.txt b/docs/_build/html/_sources/nodriver/cdp/schema.rst.txt new file mode 100644 index 0000000..f69a604 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/schema.rst.txt @@ -0,0 +1,42 @@ +Schema +====== + +This domain is deprecated. + +.. module:: nodriver.cdp.schema + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: Domain + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: get_domains + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/security.rst.txt b/docs/_build/html/_sources/nodriver/cdp/security.rst.txt new file mode 100644 index 0000000..34d4d4e --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/security.rst.txt @@ -0,0 +1,112 @@ +Security +======== + +Security + +.. module:: nodriver.cdp.security + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: CertificateId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MixedContentType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecurityState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CertificateSecurityState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SafetyTipStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SafetyTipInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VisibleSecurityState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecurityStateExplanation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InsecureContentStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CertificateErrorAction + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: handle_certificate_error + +.. autofunction:: set_ignore_certificate_errors + +.. autofunction:: set_override_certificate_errors + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: CertificateError + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VisibleSecurityStateChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SecurityStateChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/service_worker.rst.txt b/docs/_build/html/_sources/nodriver/cdp/service_worker.rst.txt new file mode 100644 index 0000000..236eaf3 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/service_worker.rst.txt @@ -0,0 +1,108 @@ +ServiceWorker +============= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.service_worker + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: RegistrationID + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerRegistration + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerVersionRunningStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerVersionStatus + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerVersion + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ServiceWorkerErrorMessage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: deliver_push_message + +.. autofunction:: disable + +.. autofunction:: dispatch_periodic_sync_event + +.. autofunction:: dispatch_sync_event + +.. autofunction:: enable + +.. autofunction:: inspect_worker + +.. autofunction:: set_force_update_on_page_load + +.. autofunction:: skip_waiting + +.. autofunction:: start_worker + +.. autofunction:: stop_all_workers + +.. autofunction:: stop_worker + +.. autofunction:: unregister + +.. autofunction:: update_registration + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: WorkerErrorReported + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WorkerRegistrationUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: WorkerVersionUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/storage.rst.txt b/docs/_build/html/_sources/nodriver/cdp/storage.rst.txt new file mode 100644 index 0000000..6da7f90 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/storage.rst.txt @@ -0,0 +1,367 @@ +Storage +======= + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.storage + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: SerializedStorageKey + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UsageForType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TrustTokens + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAuctionId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAccessType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAuctionEventType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAuctionFetchType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageAccessType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageReportingMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageUrlWithMetadata + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageAccessParams + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageBucketsDurability + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageBucket + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageBucketInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingSourceType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UnsignedInt64AsBase10 + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: UnsignedInt128AsBase16 + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SignedInt64AsBase10 + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingFilterDataEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingFilterConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingFilterPair + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregationKeysEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingEventReportWindows + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingTriggerSpec + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingTriggerDataMatching + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingSourceRegistration + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingSourceRegistrationResult + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingSourceRegistrationTimeConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregatableValueDictEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregatableValueEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingEventTriggerData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregatableTriggerData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregatableDedupKey + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingTriggerRegistration + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingEventLevelResult + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingAggregatableResult + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RelatedWebsiteSet + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: clear_cookies + +.. autofunction:: clear_data_for_origin + +.. autofunction:: clear_data_for_storage_key + +.. autofunction:: clear_shared_storage_entries + +.. autofunction:: clear_trust_tokens + +.. autofunction:: delete_shared_storage_entry + +.. autofunction:: delete_storage_bucket + +.. autofunction:: get_cookies + +.. autofunction:: get_interest_group_details + +.. autofunction:: get_related_website_sets + +.. autofunction:: get_shared_storage_entries + +.. autofunction:: get_shared_storage_metadata + +.. autofunction:: get_storage_key_for_frame + +.. autofunction:: get_trust_tokens + +.. autofunction:: get_usage_and_quota + +.. autofunction:: override_quota_for_origin + +.. autofunction:: reset_shared_storage_budget + +.. autofunction:: run_bounce_tracking_mitigations + +.. autofunction:: send_pending_attribution_reports + +.. autofunction:: set_attribution_reporting_local_testing_mode + +.. autofunction:: set_attribution_reporting_tracking + +.. autofunction:: set_cookies + +.. autofunction:: set_interest_group_auction_tracking + +.. autofunction:: set_interest_group_tracking + +.. autofunction:: set_shared_storage_entry + +.. autofunction:: set_shared_storage_tracking + +.. autofunction:: set_storage_bucket_tracking + +.. autofunction:: track_cache_storage_for_origin + +.. autofunction:: track_cache_storage_for_storage_key + +.. autofunction:: track_indexed_db_for_origin + +.. autofunction:: track_indexed_db_for_storage_key + +.. autofunction:: untrack_cache_storage_for_origin + +.. autofunction:: untrack_cache_storage_for_storage_key + +.. autofunction:: untrack_indexed_db_for_origin + +.. autofunction:: untrack_indexed_db_for_storage_key + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: CacheStorageContentUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CacheStorageListUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IndexedDBContentUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: IndexedDBListUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAccessed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAuctionEventOccurred + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: InterestGroupAuctionNetworkRequestCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SharedStorageAccessed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageBucketCreatedOrUpdated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StorageBucketDeleted + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingSourceRegistered + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AttributionReportingTriggerRegistered + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/system_info.rst.txt b/docs/_build/html/_sources/nodriver/cdp/system_info.rst.txt new file mode 100644 index 0000000..144b1d7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/system_info.rst.txt @@ -0,0 +1,88 @@ +SystemInfo +========== + +The SystemInfo domain defines methods and events for querying low-level system information. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.system_info + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: GPUDevice + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Size + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VideoDecodeAcceleratorCapability + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VideoEncodeAcceleratorCapability + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SubsamplingFormat + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ImageType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ImageDecodeAcceleratorCapability + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: GPUInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ProcessInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: get_feature_state + +.. autofunction:: get_info + +.. autofunction:: get_process_info + +Events +------ + +*There are no events in this module.* diff --git a/docs/_build/html/_sources/nodriver/cdp/target.rst.txt b/docs/_build/html/_sources/nodriver/cdp/target.rst.txt new file mode 100644 index 0000000..1d62d02 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/target.rst.txt @@ -0,0 +1,136 @@ +Target +====== + +Supports additional targets discovery and allows to attach to them. + +.. module:: nodriver.cdp.target + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: TargetID + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: SessionID + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetInfo + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: FilterEntry + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetFilter + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: RemoteLocation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: activate_target + +.. autofunction:: attach_to_browser_target + +.. autofunction:: attach_to_target + +.. autofunction:: auto_attach_related + +.. autofunction:: close_target + +.. autofunction:: create_browser_context + +.. autofunction:: create_target + +.. autofunction:: detach_from_target + +.. autofunction:: dispose_browser_context + +.. autofunction:: expose_dev_tools_protocol + +.. autofunction:: get_browser_contexts + +.. autofunction:: get_target_info + +.. autofunction:: get_targets + +.. autofunction:: send_message_to_target + +.. autofunction:: set_auto_attach + +.. autofunction:: set_discover_targets + +.. autofunction:: set_remote_locations + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: AttachedToTarget + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DetachedFromTarget + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ReceivedMessageFromTarget + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetCrashed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TargetInfoChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/tethering.rst.txt b/docs/_build/html/_sources/nodriver/cdp/tethering.rst.txt new file mode 100644 index 0000000..0950dba --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/tethering.rst.txt @@ -0,0 +1,45 @@ +Tethering +========= + +The Tethering domain defines methods and events for browser port binding. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.tethering + +* Types_ +* Commands_ +* Events_ + +Types +----- + +*There are no types in this module.* + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: bind + +.. autofunction:: unbind + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: Accepted + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/tracing.rst.txt b/docs/_build/html/_sources/nodriver/cdp/tracing.rst.txt new file mode 100644 index 0000000..bf6e9d9 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/tracing.rst.txt @@ -0,0 +1,90 @@ +Tracing +======= + +.. module:: nodriver.cdp.tracing + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: MemoryDumpConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TraceConfig + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StreamFormat + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: StreamCompression + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: MemoryDumpLevelOfDetail + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TracingBackend + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: end + +.. autofunction:: get_categories + +.. autofunction:: record_clock_sync_marker + +.. autofunction:: request_memory_dump + +.. autofunction:: start + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: BufferUsage + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: DataCollected + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: TracingComplete + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/web_audio.rst.txt b/docs/_build/html/_sources/nodriver/cdp/web_audio.rst.txt new file mode 100644 index 0000000..5a164d3 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/web_audio.rst.txt @@ -0,0 +1,176 @@ +WebAudio +======== + +This domain allows inspection of Web Audio API. +https://webaudio.github.io/web-audio-api/ + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.web_audio + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: GraphObjectId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContextType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContextState + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodeType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ChannelCountMode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ChannelInterpretation + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ParamType + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AutomationRate + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContextRealtimeData + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: BaseAudioContext + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioListener + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioNode + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioParam + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_realtime_data + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: ContextCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContextWillBeDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: ContextChanged + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioListenerCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioListenerWillBeDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioNodeCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioNodeWillBeDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioParamCreated + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AudioParamWillBeDestroyed + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodesConnected + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodesDisconnected + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodeParamConnected + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: NodeParamDisconnected + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/cdp/web_authn.rst.txt b/docs/_build/html/_sources/nodriver/cdp/web_authn.rst.txt new file mode 100644 index 0000000..b5df442 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/cdp/web_authn.rst.txt @@ -0,0 +1,106 @@ +WebAuthn +======== + +This domain allows configuring virtual authenticators to test the WebAuthn +API. + +*This CDP domain is experimental.* + +.. module:: nodriver.cdp.web_authn + +* Types_ +* Commands_ +* Events_ + +Types +----- + +Generally, you do not need to instantiate CDP types +yourself. Instead, the API creates objects for you as return +values from commands, and then you can use those objects as +arguments to other commands. + +.. autoclass:: AuthenticatorId + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthenticatorProtocol + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Ctap2Version + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: AuthenticatorTransport + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: VirtualAuthenticatorOptions + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: Credential + :members: + :undoc-members: + :exclude-members: from_json, to_json + +Commands +-------- + +Each command is a generator function. The return +type ``Generator[x, y, z]`` indicates that the generator +*yields* arguments of type ``x``, it must be resumed with +an argument of type ``y``, and it returns type ``z``. In +this library, types ``x`` and ``y`` are the same for all +commands, and ``z`` is the return type you should pay attention +to. For more information, see +:ref:`Getting Started: Commands `. + +.. autofunction:: add_credential + +.. autofunction:: add_virtual_authenticator + +.. autofunction:: clear_credentials + +.. autofunction:: disable + +.. autofunction:: enable + +.. autofunction:: get_credential + +.. autofunction:: get_credentials + +.. autofunction:: remove_credential + +.. autofunction:: remove_virtual_authenticator + +.. autofunction:: set_automatic_presence_simulation + +.. autofunction:: set_credential_properties + +.. autofunction:: set_response_override_bits + +.. autofunction:: set_user_verified + +Events +------ + +Generally, you do not need to instantiate CDP events +yourself. Instead, the API creates events for you and then +you use the event's attributes. + +.. autoclass:: CredentialAdded + :members: + :undoc-members: + :exclude-members: from_json, to_json + +.. autoclass:: CredentialAsserted + :members: + :undoc-members: + :exclude-members: from_json, to_json diff --git a/docs/_build/html/_sources/nodriver/classes/browser.rst.txt b/docs/_build/html/_sources/nodriver/classes/browser.rst.txt new file mode 100644 index 0000000..3b3cba1 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/classes/browser.rst.txt @@ -0,0 +1,42 @@ +.. _browser: + +Browser class +--------------------- + +cookies +^^^^^^^^^^^^^^^^^^^^^^^ + +You can load and save all cookies from the browser. + + +.. code-block:: + + # save. when no filepath is given, it is saved in '.session.dat' + await browser.cookies.save() + + +.. code-block:: + + # load. when no filepath is given, it is loaded from '.session.dat' + await browser.cookies.load() + + +.. code-block:: + + # export for requests or other library + requests_style_cookies = await browser.cookies.get_all(requests_cookie_format=True) + + # use in requests: + session = requests.Session() + for cookie in requests_style_cookies: + session.cookies.set_cookie(cookie) + + +Browser class +^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: nodriver.Browser + :members: + :undoc-members: + :inherited-members: + + diff --git a/docs/_build/html/_sources/nodriver/classes/element.rst.txt b/docs/_build/html/_sources/nodriver/classes/element.rst.txt new file mode 100644 index 0000000..2d475c7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/classes/element.rst.txt @@ -0,0 +1,14 @@ +.. _element: + +Element class +--------------------- + +Some words about the Element class + + +.. autoclass:: nodriver.Element + :members: + :undoc-members: + :inherited-members: + + diff --git a/docs/_build/html/_sources/nodriver/classes/others_and_helpers.rst.txt b/docs/_build/html/_sources/nodriver/classes/others_and_helpers.rst.txt new file mode 100644 index 0000000..3b1421c --- /dev/null +++ b/docs/_build/html/_sources/nodriver/classes/others_and_helpers.rst.txt @@ -0,0 +1,38 @@ +================================ +Other classes and Helper classes +================================ + + +Config class +----------------- + +.. autoclass:: nodriver.Config + :members: + :undoc-members: + :inherited-members: + + + +ContraDict class +----------------- + +Many components in this package are built using a +base class of :any:`nodriver.core._contradict.ContraDict`. + +It's nothing more than a dictionary which has attribute access AND +is JSON serializable. + + +.. autoclass:: nodriver.core._contradict.ContraDict + :members: + :inherited-members: + + +Helper functions +--------------------- + +.. automodule:: nodriver.core._contradict + :members: + :inherited-members: + + diff --git a/docs/_build/html/_sources/nodriver/classes/tab.rst.txt b/docs/_build/html/_sources/nodriver/classes/tab.rst.txt new file mode 100644 index 0000000..0903ae5 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/classes/tab.rst.txt @@ -0,0 +1,13 @@ +.. _tab: + +Tab class +------------- +.. autoclass:: nodriver.Tab + :members: + :inherited-members: + :member-order: alphabetical + :undoc-members: + :exclude-members: __setattr__ + + + diff --git a/docs/_build/html/_sources/nodriver/quickstart.rst.txt b/docs/_build/html/_sources/nodriver/quickstart.rst.txt new file mode 100644 index 0000000..dfb14e7 --- /dev/null +++ b/docs/_build/html/_sources/nodriver/quickstart.rst.txt @@ -0,0 +1,262 @@ + +Quickstart guide +================= + + +Installation +-------------- +Since it's a part of undetected-chromedriver, installation goes via + +.. code-block:: + + # todo. use pip install nodriver instead + pip install undetected-chromedriver + +-------- + +Or as a seperate package via: + +.. code-block:: + + pip install nodriver + + +.. _getting-started-commands: + +usage example +=============== + +The aim of this project (just like undetected-chromedriver, somewhere long ago) +is to keep it short and simple, so you can quickly open an editor or interactive session, +type or paste a few lines and off you go. + +.. code-block:: python + + + import nodriver as uc + + async def main(): + + browser = await uc.start() + page = await browser.get('https://www.nowsecure.nl') + + ... further code ... + + if __name__ == '__main__': + # since asyncio.run never worked (for me) + uc.loop().run_until_complete(main()) + + +More complete example +============================ + +.. code-block:: + + import nodriver + + async def main(): + + browser = await nodriver.start() + page = await browser.get('https://www.nowsecure.nl') + + await page.save_screenshot() + await page.get_content() + await page.scroll_down(150) + elems = await page.select_all('*[src]') + + for elem in elems: + await elem.flash() + + page2 = await browser.get('https://twitter.com', new_tab=True) + page3 = await browser.get('https://github.com/ultrafunkamsterdam/nodriver', new_window=True) + + for p in (page, page2, page3): + await p.bring_to_front() + await p.scroll_down(200) + await p # wait for events to be processed + await p.reload() + if p != page3: + await p.close() + + if __name__ == '__main__': + + # since asyncio.run never worked (for me) + uc.loop().run_until_complete(main()) + + +Custom starting options +============================ +I'll leave out the async boilerplate here + +.. code-block:: + + from nodriver import * + + browser = await start( + headless=False, + user_data_dir="/path/to/existing/profile", # by specifying it, it won't be automatically cleaned up when finished + browser_executable_path="/path/to/some/other/browser", + browser_args=['--some-browser-arg=true', '--some-other-option'], + lang="en-US" # this could set iso-language-code in navigator, not recommended to change + ) + tab = await browser.get('https://somewebsite.com') + + + +Alternative custom options +============================ +I'll leave out the async boilerplate here + +.. code-block:: + + from nodriver import * + + config = Config() + config.headless = False + config.user_data_dir="/path/to/existing/profile", # by specifying it, it won't be automatically cleaned up when finished + config.browser_executable_path="/path/to/some/other/browser", + config.browser_args=['--some-browser-arg=true', '--some-other-option'], + config.lang="en-US" # this could set iso-language-code in navigator, not recommended to change + ) + + + + + +A more concrete example, which can be found in the ./example/ folder, +shows a script to create a twitter account + +.. code-block:: python + + import asyncio + import random + import string + import logging + + logging.basicConfig(level=30) + + import nodriver as uc + + months = [ + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", + ] + + + async def main(): + driver = await uc.start() + + tab = await driver.get("https://twitter.com") + + # wait for text to appear instead of a static number of seconds to wait + # this does not always work as expected, due to speed. + print('finding the "create account" button') + create_account = await tab.find("create account", best_match=True) + + print('"create account" => click') + await create_account.click() + + print("finding the email input field") + email = await tab.select("input[type=email]") + + # sometimes, email field is not shown, because phone is being asked instead + # when this occurs, find the small text which says "use email instead" + if not email: + use_mail_instead = await tab.find("use email instead") + # and click it + await use_mail_instead.click() + + # now find the email field again + email = await tab.select("input[type=email]") + + randstr = lambda k: "".join(random.choices(string.ascii_letters, k=k)) + + # send keys to email field + print('filling in the "email" input field') + await email.send_keys("".join([randstr(8), "@", randstr(8), ".com"])) + + # find the name input field + print("finding the name input field") + name = await tab.select("input[type=text]") + + # again, send random text + print('filling in the "name" input field') + await name.send_keys(randstr(8)) + + # since there are 3 select fields on the tab, we can use unpacking + # to assign each field + print('finding the "month" , "day" and "year" fields in 1 go') + sel_month, sel_day, sel_year = await tab.select_all("select") + + # await sel_month.focus() + print('filling in the "month" input field') + await sel_month.send_keys(months[random.randint(0, 11)].title()) + + # await sel_day.focus() + # i don't want to bother with month-lengths and leap years + print('filling in the "day" input field') + await sel_day.send_keys(str(random.randint(0, 28))) + + # await sel_year.focus() + # i don't want to bother with age restrictions + print('filling in the "year" input field') + await sel_year.send_keys(str(random.randint(1980, 2005))) + + await tab + + # let's handle the cookie nag as well + cookie_bar_accept = await tab.find("accept all", best_match=True) + if cookie_bar_accept: + await cookie_bar_accept.click() + + await tab.sleep(1) + + next_btn = await tab.find(text="next", best_match=True) + # for btn in reversed(next_btns): + await next_btn.mouse_click() + + print("sleeping 2 seconds") + await tab.sleep(2) # visually see what part we're actually in + + print('finding "next" button') + next_btn = await tab.find(text="next", best_match=True) + print('clicking "next" button') + await next_btn.mouse_click() + + # just wait for some button, before we continue + await tab.select("[role=button]") + + print('finding "sign up" button') + sign_up_btn = await tab.find("Sign up", best_match=True) + # we need the second one + print('clicking "sign up" button') + await sign_up_btn.click() + + print('the rest of the "implementation" is out of scope') + # further implementation outside of scope + await tab.sleep(10) + driver.stop() + + # verification code per mail + + + if __name__ == "__main__": + # since asyncio.run never worked (for me) + # i use + uc.loop().run_until_complete(main()) + + + + + + diff --git a/docs/_build/html/_sources/readme.rst.txt b/docs/_build/html/_sources/readme.rst.txt new file mode 100644 index 0000000..394323e --- /dev/null +++ b/docs/_build/html/_sources/readme.rst.txt @@ -0,0 +1,271 @@ + + +################## +NODRIVER +################## + + +**This package provides next level webscraping and browser automation +using a relatively simple interface.** + +* **This is the official successor of the** `Undetected-Chromedriver `_ **python package.** +* **No more webdriver, no more selenium** + +Direct communication provides even better resistance against web applicatinon firewalls (WAF's), while +performance gets a massive boost. +This module is, contrary to undetected-chromedriver, fully asynchronous. + +What makes this package different from other known packages, +is the optimization to stay undetected for most anti-bot solutions. + +Another focus point is usability and quick prototyping, so expect a lot to work `-as is-` , +with most method parameters having `best practice` defaults. +Using 1 or 2 lines, this is up and running, providing best practice config +by default. + +While usability and convenience is important. It's also easy +to fully customizable everything using the entire array of +`CDP `_ domains, methods and events available. + + +Some features +^^^^^^^^^^^^^^^^^^^^^^ + +* A blazing fast undetected chrome (-ish) automation library + +* No chromedriver binary or Selenium dependency + +* This equals bizarre performance increase and less detections! + +* Up and running in 1 line of code* + +* uses fresh profile on each run, cleans up on exit + +* save and load cookies to file to not repeat tedious login steps + +* smart element lookup, by selector or text, including iframe content. + this could also be used as wait condition for a element to appear, since it will retry + for the duration of until found. + single element lookup by text using tab.find(), accepts a best_match flag, which will not + naively return the first match, but will match candidates by closest matching text length. + +* descriptive __repr__ for elements, which represent the element as html + +* utility function to convert a running undetected_chromedriver.Chrome instance + to a nodriver.Browser instance and contintue from there + +* packed with helpers and utility methods for most used and important operations + +.. + * ```elem.text``` + * ```elem.text_all``` + + * ```elem.parent.parent.parent.attrs``` + * ```anchor_elem.href and anchor_elem['href']``` + * ```anchor_elem.href = 'someotherthing'; await anchor_elem.save()``` + * ```elem.children[-1].children[0].children[4].parent.parent``` + + * ```await html5video_element.record_video()``` + * ```await html5video_element('pause')``` + * ```await html5video_element.apply('''(el) => el.currentTime = 0''')``` + * ```tab = await browser.get(url, new_tab=True)``` + * ```tab_win = await browser.get(url, new_window=True)``` + * ```first = await tab.find('search text')``` + * ```best = await tab.find('search text', best_match=True)``` + * ```all_results = await tab.find_all('search text')``` + * ```first_submit_button = await tab.select(selector='button[type=submit]')``` + * ```inputs_in_form = await tab.select_all('form input')``` + + + +Installation +============= +Since it's a part of undetected-chromedriver, installation goes via + +.. code-block:: + + pip install undetected-chromedriver + +-------- + +In case you don't want undetected-chromedriver, this package can be installed +using + +.. code-block:: + + pip install nodriver + + +.. _getting-started-commands: + +usage example +=============== + +The aim of this project (just like undetected-chromedriver, somewhere long ago) +is to keep it short and simple, so you can quickly open an editor or interactive session, +type or paste a few lines and off you go. + +.. code-block:: python + + import asyncio + import nodriver as uc + + async def main(): + browser = await uc.start() + page = await browser.get('https://www.nowsecure.nl') + + await page.save_screenshot() + await page.get_content() + await page.scroll_down(150) + elems = await page.select_all('*[src]') + for elem in elems: + await elem.flash() + + page2 = await browser.get('https://twitter.com', new_tab=True) + page3 = await browser.get('https://github.com/ultrafunkamsterdam/nodriver', new_window=True) + + for p in (page, page2, page3): + await p.bring_to_front() + await p.scroll_down(200) + await p # wait for events to be processed + await p.reload() + if p != page3: + await p.close() + + + if __name__ == '__main__': + + # since asyncio.run never worked (for me) + uc.loop().run_until_complete(main()) + + +A more concrete example, which can be found in the ./example/ folder, +shows a script to create a twitter account + +.. code-block:: python + + import random + import string + import logging + + logging.basicConfig(level=30) + + import nodriver as uc + + months = [ + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", + ] + + + async def main(): + driver = await uc.start() + + tab = await driver.get("https://twitter.com") + + # wait for text to appear instead of a static number of seconds to wait + # this does not always work as expected, due to speed. + print('finding the "create account" button') + create_account = await tab.find("create account", best_match=True) + + print('"create account" => click') + await create_account.click() + + print("finding the email input field") + email = await tab.select("input[type=email]") + + # sometimes, email field is not shown, because phone is being asked instead + # when this occurs, find the small text which says "use email instead" + if not email: + use_mail_instead = await tab.find("use email instead") + # and click it + await use_mail_instead.click() + + # now find the email field again + email = await tab.select("input[type=email]") + + randstr = lambda k: "".join(random.choices(string.ascii_letters, k=k)) + + # send keys to email field + print('filling in the "email" input field') + await email.send_keys("".join([randstr(8), "@", randstr(8), ".com"])) + + # find the name input field + print("finding the name input field") + name = await tab.select("input[type=text]") + + # again, send random text + print('filling in the "name" input field') + await name.send_keys(randstr(8)) + + # since there are 3 select fields on the tab, we can use unpacking + # to assign each field + print('finding the "month" , "day" and "year" fields in 1 go') + sel_month, sel_day, sel_year = await tab.select_all("select") + + # await sel_month.focus() + print('filling in the "month" input field') + await sel_month.send_keys(months[random.randint(0, 11)].title()) + + # await sel_day.focus() + # i don't want to bother with month-lengths and leap years + print('filling in the "day" input field') + await sel_day.send_keys(str(random.randint(0, 28))) + + # await sel_year.focus() + # i don't want to bother with age restrictions + print('filling in the "year" input field') + await sel_year.send_keys(str(random.randint(1980, 2005))) + + await tab + + # let's handle the cookie nag as well + cookie_bar_accept = await tab.find("accept all", best_match=True) + if cookie_bar_accept: + await cookie_bar_accept.click() + + await tab.sleep(1) + + next_btn = await tab.find(text="next", best_match=True) + # for btn in reversed(next_btns): + await next_btn.mouse_click() + + print("sleeping 2 seconds") + await tab.sleep(2) # visually see what part we're actually in + + print('finding "next" button') + next_btn = await tab.find(text="next", best_match=True) + print('clicking "next" button') + await next_btn.mouse_click() + + # just wait for some button, before we continue + await tab.select("[role=button]") + + print('finding "sign up" button') + sign_up_btn = await tab.find("Sign up", best_match=True) + # we need the second one + print('clicking "sign up" button') + await sign_up_btn.click() + + print('the rest of the "implementation" is out of scope') + # further implementation outside of scope + await tab.sleep(10) + driver.stop() + + # verification code per mail + + + if __name__ == "__main__": + # since asyncio.run never worked (for me) + # i use + uc.loop().run_until_complete(main()) diff --git a/docs/_build/html/_sources/style.rst.txt b/docs/_build/html/_sources/style.rst.txt new file mode 100644 index 0000000..89c92dc --- /dev/null +++ b/docs/_build/html/_sources/style.rst.txt @@ -0,0 +1,39 @@ + +.. + * ``#`` with overline, for parts + * ``*`` with overline, for chapters + * ``=``, for sections + * ``-``, for subsections + * ``^``, for subsubsections + * ``"``, for paragraphs + +################## +TITLE +################## +title something + +SECTION +================== +section something + +SUBSECTION +^^^^^^^^^^^^^^^^^^^^ +subsection something + +PARAGRAPH +""""""""""""""""""""""""" +paragraph something + + +TABLES +^^^^^^^^^^^^^^^^ +===== ===== ====== + Inputs Output +------------ ------ + A B A or B +===== ===== ====== +False False False +True False True +False True True +True True True +===== ===== ====== \ No newline at end of file diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css new file mode 100644 index 0000000..30fee9d --- /dev/null +++ b/docs/_build/html/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/html/_static/custom.css b/docs/_build/html/_static/custom.css new file mode 100644 index 0000000..a316496 --- /dev/null +++ b/docs/_build/html/_static/custom.css @@ -0,0 +1,21 @@ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;800&display=swap'); + +html,body { + font-family: 'Inter', sans-serif; +} + +li.toctree-l1 { + list-style: '> '; + margin: 0.2rem 0; +} + +.toctree-wrapper.compound li.toctree-l1 { + margin: 1rem 0; +} +a { + text-decoration: none; + color: #82a8ff; +} + + +/**/ \ No newline at end of file diff --git a/docs/_build/html/_static/debug.css b/docs/_build/html/_static/debug.css new file mode 100644 index 0000000..74d4aec --- /dev/null +++ b/docs/_build/html/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js new file mode 100644 index 0000000..d06a71d --- /dev/null +++ b/docs/_build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js new file mode 100644 index 0000000..7e4c114 --- /dev/null +++ b/docs/_build/html/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..a858a410e4faa62ce324d814e4b816fff83a6fb3 GIT binary patch literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( literal 0 HcmV?d00001 diff --git a/docs/_build/html/_static/language_data.js b/docs/_build/html/_static/language_data.js new file mode 100644 index 0000000..250f566 --- /dev/null +++ b/docs/_build/html/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/_build/html/_static/minus.png b/docs/_build/html/_static/minus.png new file mode 100644 index 0000000000000000000000000000000000000000..d96755fdaf8bb2214971e0db9c1fd3077d7c419d GIT binary patch literal 90 zcmeAS@N?(olHy`uVBq!ia0vp^+#t*WBp7;*Yy1LIik>cxAr*|t7R?Mi>2?kWtu=nj kDsEF_5m^0CR;1wuP-*O&G^0G}KYk!hp00i_>zopr08q^qX#fBK literal 0 HcmV?d00001 diff --git a/docs/_build/html/_static/plus.png b/docs/_build/html/_static/plus.png new file mode 100644 index 0000000000000000000000000000000000000000..7107cec93a979b9a5f64843235a16651d563ce2d GIT binary patch literal 90 zcmeAS@N?(olHy`uVBq!ia0vp^+#t*WBp7;*Yy1LIik>cxAr*|t7R?Mi>2?kWtu>-2 m3q%Vub%g%s<8sJhVPMczOq}xhg9DJoz~JfX=d#Wzp$Pyb1r*Kz literal 0 HcmV?d00001 diff --git a/docs/_build/html/_static/pygments.css b/docs/_build/html/_static/pygments.css new file mode 100644 index 0000000..e91a351 --- /dev/null +++ b/docs/_build/html/_static/pygments.css @@ -0,0 +1,249 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #eeffcc; } +.highlight .c { color: #408090; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #333333 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #208050 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #208050 } /* Literal.Number.Bin */ +.highlight .mf { color: #208050 } /* Literal.Number.Float */ +.highlight .mh { color: #208050 } /* Literal.Number.Hex */ +.highlight .mi { color: #208050 } /* Literal.Number.Integer */ +.highlight .mo { color: #208050 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #49483e } +body[data-theme="dark"] .highlight { background: #272822; color: #f8f8f2 } +body[data-theme="dark"] .highlight .c { color: #959077 } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #ed007e; background-color: #1e0010 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #f8f8f2 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #f8f8f2 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #66d9ef } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #ae81ff } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #ff4689 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #f8f8f2 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #959077 } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #959077 } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #959077 } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #959077 } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #959077 } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #959077 } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #ff4689 } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .ges { color: #f8f8f2; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body[data-theme="dark"] .highlight .gr { color: #f8f8f2 } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #f8f8f2 } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #a6e22e } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #66d9ef } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #ff4689; font-weight: bold } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #959077 } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #66d9ef } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #ff4689 } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #66d9ef } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #e6db74 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #ae81ff } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #e6db74 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #a6e22e } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #a6e22e } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #66d9ef } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #a6e22e } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #f8f8f2 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #a6e22e } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #a6e22e } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #f8f8f2 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #a6e22e } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #f8f8f2 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #ff4689 } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #f8f8f2 } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #ff4689 } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #e6db74 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ae81ff } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #e6db74 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #49483e } +body:not([data-theme="light"]) .highlight { background: #272822; color: #f8f8f2 } +body:not([data-theme="light"]) .highlight .c { color: #959077 } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #ed007e; background-color: #1e0010 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #f8f8f2 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #f8f8f2 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #66d9ef } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #ae81ff } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #f8f8f2 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #ff4689 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #f8f8f2 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #f8f8f2 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #959077 } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #959077 } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #959077 } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #959077 } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #959077 } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #959077 } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #ff4689 } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #f8f8f2; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .ges { color: #f8f8f2; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body:not([data-theme="light"]) .highlight .gr { color: #f8f8f2 } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #f8f8f2 } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #a6e22e } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #66d9ef } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #ff4689; font-weight: bold } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #f8f8f2; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #959077 } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #f8f8f2 } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #66d9ef } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #ff4689 } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #66d9ef } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #e6db74 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #ae81ff } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #e6db74 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #a6e22e } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #a6e22e } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #66d9ef } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #a6e22e } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #f8f8f2 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #a6e22e } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #a6e22e } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #f8f8f2 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #a6e22e } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #f8f8f2 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #ff4689 } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #f8f8f2 } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #ff4689 } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #e6db74 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ae81ff } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #e6db74 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/docs/_build/html/_static/scripts/furo-extensions.js b/docs/_build/html/_static/scripts/furo-extensions.js new file mode 100644 index 0000000..e69de29 diff --git a/docs/_build/html/_static/scripts/furo.js b/docs/_build/html/_static/scripts/furo.js new file mode 100644 index 0000000..32e7c05 --- /dev/null +++ b/docs/_build/html/_static/scripts/furo.js @@ -0,0 +1,3 @@ +/*! For license information please see furo.js.LICENSE.txt */ +(()=>{var t={212:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort((function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,(function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})})),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame((function(){r(a),v.detect()}))};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,(function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}})),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(212),e=n.n(t),o=null,r=null,c=window.pageYOffset||document.documentElement.scrollTop;const s=64;function l(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function a(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach((t=>{t.addEventListener("click",l)}))}(),function(){let t=0,e=!1;window.addEventListener("scroll",(function(n){t=window.scrollY,e||(window.requestAnimationFrame((function(){var n;n=t,0==Math.floor(r.getBoundingClientRect().top)?r.classList.add("scrolled"):r.classList.remove("scrolled"),function(t){tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1})),e=!0)})),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);return r.getBoundingClientRect().height+.5*t+1}})}document.addEventListener("DOMContentLoaded",(function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),a()}))})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/docs/_build/html/_static/scripts/furo.js.LICENSE.txt b/docs/_build/html/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 0000000..1632189 --- /dev/null +++ b/docs/_build/html/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/docs/_build/html/_static/scripts/furo.js.map b/docs/_build/html/_static/scripts/furo.js.map new file mode 100644 index 0000000..7b7ddb1 --- /dev/null +++ b/docs/_build/html/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACPA,OACAC,KAbS,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,EAVgB,CAWrC,EAOIK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,CACpC,EAMIG,EAAe,SAAUC,GACvBA,GACFA,EAASC,MAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,CACT,GAEJ,EAwCIC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,OAC7B,CA2Be4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,CACrC,EAMImC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,aAkC7B,EAmBIU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,GAEvD,CAUMwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,EAEjE,EAOIC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,GAV0B,CAWjD,EAOIiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,EAOIoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,GAVS,CAW9B,EA6LA,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,GAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,GAEb,IAGAL,EAAaC,EACf,EAKAgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,CAqEIuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,KAchB,GAMIe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,OACpD,EAMIC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,uBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,QACb,GACF,EAkDA,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,IACb,EAOEA,EA3XS,WACX,IAAI+E,EAAS,CAAC,EAOd,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,WAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,EACpB,CACF,IACOH,CACT,CAkXeK,CAAOhG,EAAUmE,GAAW,CAAC,GAGxCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,CACT,CAOF,CArcW4B,CAAQvG,EAChB,UAFM,SAEN,uBCXDwG,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,OACf,CCrBAJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,CAAM,ECLdR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,IAE1E,ECNDO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,EAChB,CAAE,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,MACxC,CACA,CAPuB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,4CCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgB/H,OAAO6C,aAAeP,SAASC,gBAAgByF,UACnE,MAAMC,EAAmB,GA2EzB,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaItI,OAAOuI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGThG,SAASS,KAAK4F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,UA0B5B,CAkDA,SAASnC,KART,WAEE,MAAM4C,EAAUzG,SAAS0G,uBAAuB,gBAChDrE,MAAMsE,KAAKF,GAASlE,SAASqE,IAC3BA,EAAI9C,iBAAiB,QAAS8B,EAAe,GAEjD,CAGEiB,GA9CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdrJ,OAAOoG,iBAAiB,UAAU,SAAUuB,GAC1CyB,EAA6BpJ,OAAOsJ,QAE/BD,IACHrJ,OAAOwF,uBAAsB,WAzDnC,IAAuB+D,IA0DDH,EA9GkC,GAAlDzG,KAAK6G,MAAM1B,EAAO7F,wBAAwBQ,KAC5CqF,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,YAI5B,SAAmCyF,GAC7BA,EAAYtB,EACd3F,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCyF,EAAYxB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BoF,EAAYxB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBwB,CAClB,CAoCEE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAd1B,IAKa,GAAb0B,EACF1B,EAAU6B,SAAS,EAAG,GAGtB/G,KAAKC,KAAK2G,IACV5G,KAAK6G,MAAMlH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU6B,SAAS,EAAG7B,EAAU7E,cAGhBV,SAASqH,cAAc,mBAc3C,CAKEC,CAAoBL,GAwDdF,GAAU,CACZ,IAEAA,GAAU,EAEd,IACArJ,OAAO6J,QACT,CA6BEC,GA1BkB,OAAdjC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRuJ,WAAW,EACX5J,SAAU,iBACVI,OAAQ,KACN,IAAIyJ,EAAM9H,WAAW+H,iBAAiB3H,SAASC,iBAAiB2H,UAChE,OAAOpC,EAAO7F,wBAAwBkI,OAAS,GAAMH,EAAM,CAAC,GAiBlE,CAcA1H,SAAS8D,iBAAiB,oBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASqH,cAAc,UAChC9B,EAAYvF,SAASqH,cAAc,eAEnCxD,GACF","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = window.pageYOffset || document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader() {\n if (Math.floor(header.getBoundingClientRect().top) == 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader();\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n return header.getBoundingClientRect().height + 0.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","GO_TO_TOP_OFFSET","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","floor","scrollHandlerForBackToTop","scrollTo","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","height"],"sourceRoot":""} \ No newline at end of file diff --git a/docs/_build/html/_static/searchtools.js b/docs/_build/html/_static/searchtools.js new file mode 100644 index 0000000..7918c3f --- /dev/null +++ b/docs/_build/html/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/_build/html/_static/skeleton.css b/docs/_build/html/_static/skeleton.css new file mode 100644 index 0000000..467c878 --- /dev/null +++ b/docs/_build/html/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/docs/_build/html/_static/sphinx_highlight.js b/docs/_build/html/_static/sphinx_highlight.js new file mode 100644 index 0000000..8a96c69 --- /dev/null +++ b/docs/_build/html/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/_build/html/_static/styles/furo-extensions.css b/docs/_build/html/_static/styles/furo-extensions.css new file mode 100644 index 0000000..bc447f2 --- /dev/null +++ b/docs/_build/html/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;opacity:1;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0ms}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/docs/_build/html/_static/styles/furo-extensions.css.map b/docs/_build/html/_static/styles/furo-extensions.css.map new file mode 100644 index 0000000..9ba5637 --- /dev/null +++ b/docs/_build/html/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAKE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cALA,UASA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,oBACA,CACA,wCACE,cAEJ,8BACE,UC5CN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Make it visible\n opacity: 1\n\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/docs/_build/html/_static/styles/furo.css b/docs/_build/html/_static/styles/furo.css new file mode 100644 index 0000000..3d29a21 --- /dev/null +++ b/docs/_build/html/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{-webkit-text-size-adjust:100%;line-height:1.15}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{clip:rect(0,0,0,0)!important;border:0!important;height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:1px!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#646776;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2962ff;--color-brand-content:#2a5adf;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link--hover:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link-underline--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto,body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}a:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link);text-decoration-color:var(--color-link-underline--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}.sidebar-scroll::-webkit-scrollbar,.toc-scroll::-webkit-scrollbar,article[role=main] ::-webkit-scrollbar{height:.25rem;width:.25rem}.sidebar-scroll::-webkit-scrollbar-thumb,.toc-scroll::-webkit-scrollbar-thumb,article[role=main] ::-webkit-scrollbar-thumb{background-color:var(--color-foreground-border);border-radius:.125rem}body,html{background:var(--color-background-primary);color:var(--color-foreground-primary);height:100%}article{background:var(--color-content-background);color:var(--color-content-foreground);overflow-wrap:break-word}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{vertical-align:middle}.theme-toggle{background:transparent;border:none;cursor:pointer;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1rem;vertical-align:middle;width:1rem}.theme-toggle-header{float:left;padding:1rem .5rem}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1rem;width:1rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg{color:inherit;height:1rem;width:1rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0ms,height 0ms,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{fill:currentColor;display:inline-block;height:1rem;width:1rem}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.theme-toggle-header{display:block}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.25rem;width:1.25rem}:target{scroll-margin-top:var(--header-height)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}.content{margin-left:auto;margin-right:auto}}@media(max-width:52em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){.content{padding:0 1em}article aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}.admonition p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}.admonition p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:3.5rem}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}.versionmodified{font-style:italic}div.deprecated p,div.versionadded p,div.versionchanged p{margin-bottom:.125rem;margin-top:.125rem}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);padding:.1em .2em}pre.literal-block .sig-inline,pre.literal-block code.literal{font-size:inherit;padding:0}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class] pre{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>p,div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}.table-wrapper{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg width='12' height='12' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' stroke-width='1.5' stroke='%23607D8B' fill='none' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M0 0h24v24H0z' stroke='none'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree .reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling.Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/docs/_build/html/_static/styles/furo.css.map b/docs/_build/html/_static/styles/furo.css.map new file mode 100644 index 0000000..d1dfb10 --- /dev/null +++ b/docs/_build/html/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KAEE,6BAA8B,CAD9B,gBAEF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,gCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAOE,6BAEA,mBANA,qBAEA,sBACA,0BAFA,oBAHA,4BAOA,6BANA,mBAOA,CAEF,gBACE,aCPF,KCGE,mHAEA,wGAGA,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CChCxC,+FAGA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCPjC,ukBCYA,srCAZF,kaCVA,mLAOA,oTAWA,2UAaA,0CACA,gEACA,0CAGA,gEAUA,yCACA,+DAGA,4CACA,CACA,iEAGA,sGACA,uCACA,4DAGA,sCACA,2DAEA,4CACA,kEACA,oGACA,CAEA,0GACA,+CAGA,+MAOA,+EACA,wCAIA,4DACA,sEACA,kEACA,sEACA,gDAGA,+DACA,0CACA,gEACA,gGACA,CAGA,2DACA,qDAGA,0CACA,8CACA,oDACA,oDL7GF,iCAEA,iEAME,oCKyGA,yDAIA,sCACA,kCACA,sDAGA,0CACA,kEACA,oDAEA,sDAGA,oCACA,oEAIA,CAGA,yDAGA,qDACA,oDAGA,6DAIA,iEAGA,2DAEA,2DL9IE,4DAEA,gEAIF,gEKgGA,gFAIA,oNAOA,qDAEA,gFAIA,4DAIA,oEAMA,yEAIA,6DACA,0DAGA,uDAGA,qDAEA,wDLpII,6DAEA,yDACE,2DAMN,uCAIA,yCACE,8CAGF,sDMjDA,6DAKA,oCAIA,4CACA,kBAGF,sBAMA,2BAME,qCAGA,qCAEA,iCAEA,+BAEA,mCAEA,qCAIA,CACA,gCACA,gDAKA,kCAIA,6BAEA,0CAQA,kCAIF,8BAGE,8BACA,uCAGF,sCAKE,kCAEA,sDAGA,iCACE,CACA,2FAGA,gCACE,CACA,+DCzEJ,wCAEA,sBAEF,yDAEE,mCACA,wDAGA,2GAGA,wIACE,gDAMJ,kCAGE,6BACA,0CAGA,gEACA,8BACA,uCAKA,sCAIA,kCACA,sDACA,iCACA,sCAOA,sDAKE,gGAIE,+CAGN,sBAEE,yCAMA,0BAMA,yLAMA,aACA,MAEF,6BACE,2DAIF,wCAIE,kCAGA,SACA,kCAKA,mBAGA,CAJA,eACA,CAHF,gBAEE,CAWA,mBACA,mBACA,mDAGA,YACA,CACA,kBACA,CAEE,kBAKJ,OAPE,kBAQA,CADF,GACE,iCACA,wCAEA,wBACA,aACA,CAFA,WAEA,GACA,oBACA,CAFA,gBAEA,aACE,+CAIF,UAJE,kCAIF,WACA,iBACA,GAGA,uBACE,CAJF,yBAGA,CACE,iDACA,uCAEA,yDACE,cACA,wDAKN,yDAIE,uBAEF,kBACE,uBAEA,kDAIA,0DAGA,CAHA,oBAGA,0GAYA,aAEA,CAHA,YAGA,4HAKF,+CAGE,sBAEF,WAKE,0CAEA,CALA,qCAGA,CAJA,WAOA,SAIA,2CAJA,qCAIA,CACE,wBACA,OACA,YAEJ,gBACE,gBAIA,+CAKF,CAGE,kDAGA,CANF,8BAGE,CAGA,YAEA,CAdF,2BACE,CAHA,UAEF,CAYE,UAEA,CACA,0CACF,iEAOE,iCACA,8BAGA,wCAIA,wBAKE,0CAKF,CARE,6DAGA,CALF,qBAEE,CASA,YACA,yBAGA,CAEE,cAKN,CAPI,sBAOJ,gCAGE,qBAEA,WACA,aACA,sCAEA,mBACA,6BAGA,uEADA,qBACA,6BAIA,yBACA,qCAEE,UAEA,YACA,sBAEF,8BAGA,CAPE,aACA,WAMF,4BACE,sBACA,WAMJ,uBACE,cAYE,mBAXA,qDAKA,qCAGA,CAEA,YACA,CAHA,2BAEA,CACA,oCAEA,4CACA,uBAIA,oCAEJ,CAFI,cAIF,iBACE,CAHJ,kBAGI,yBAEA,oCAIA,qDAMF,mEAEA,CACE,8CAKA,gCAEA,qCAGA,oCAGE,sBACA,CAJF,WAEE,CAFF,eAEE,SAEA,mBACA,qCACE,aACA,CAFF,YADA,qBACA,WAEE,sBACA,kEAEN,2BAEE,iDAKA,uCAGF,CACE,0DAKA,kBACF,CAFE,sBAGA,mBACA,0BAEJ,yBAII,aADA,WACA,CAMF,UAFE,kBAEF,CAJF,gBACE,CAHE,iBAMF,6CC9ZF,yBACE,WACA,iBAEA,aAFA,iBAEA,6BAEA,kCACA,mBAKA,gCAGA,CARA,QAEA,CAGA,UALA,qBAEA,qDAGA,CALA,OAQA,4BACE,cAGF,2BACE,gCAEJ,CAHE,UAGF,8CAGE,CAHF,UAGE,wCAGA,qBACA,CAFA,UAEA,6CAGA,yCAIA,sBAHA,UAGA,kCACE,OACA,CAFF,KAEE,cAQF,0CACE,CAFF,kBACA,CACE,wEACA,CARA,YACA,CAKF,mBAFF,OAII,eACA,CAJF,iCAJE,cAGJ,CANI,oBAEA,CAKF,SAIE,2BADA,UACA,kBAGF,sCACA,CAFF,WACE,WACA,qCACE,gCACA,2EACA,sDAKJ,aACE,mDAII,CAJJ,6CAII,kEACA,iBACE,iDACA,+CACE,aACA,WADA,+BACA,uEANN,YACE,mDAEE,mBADF,0CACE,CADF,qBACE,0DACA,YACE,4DACA,sEANN,YACE,8CACA,kBADA,UACA,2CACE,2EACA,cACE,kEACA,mEANN,yBACE,4DACA,sBACE,+EAEE,iEACA,qEANN,sCACE,CAGE,iBAHF,gBAGE,qBACE,CAJJ,uBACA,gDACE,wDACA,6DAHF,2CACA,CADA,gBACA,eACE,CAGE,sBANN,8BACE,CAII,iBAFF,4DACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCrEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAEE,uCAEF,kEAGA,8CAEA,uDAKA,oCAEA,yDAEE,gEAKF,+CC5FA,0EAGE,CACA,qDCLJ,+DAIE,sCAIA,kEACE,yBACA,2FAMA,gBACA,yGCbF,mBAOA,2MAIA,4HAYA,0DACE,8GAYF,8HAQE,mBAEA,6HAOF,YAGA,mIAME,eACA,CAFF,YAEE,4FAMJ,8BAEE,uBAYA,sCAEE,CAJF,oBAEA,CARA,wCAEA,CAHA,8BACA,CAFA,eACA,CAGA,wCAEA,CAEA,mDAIE,kCACE,6BACA,4CAKJ,kDAIA,eACE,aAGF,8BACE,uDACA,sCACA,cAEA,+BACA,CAFA,eAEA,wCAEF,YACE,iBACA,mCACA,0DAGF,qBAEE,CAFF,kBAEE,+BAIA,yCAEE,qBADA,gBACA,yBAKF,eACA,CAFF,YACE,CACA,iBACA,qDAEA,mDCvIJ,2FAOE,iCACA,CAEA,eACA,CAHA,kBAEA,CAFA,wBAGA,8BACA,eACE,CAFF,YAEE,0BACA,8CAGA,oBACE,oCAGA,kBACE,8DAEA,iBAEN,UACE,8BAIJ,+CAEE,qDAEF,kDAIE,YAEF,CAFE,YAEF,CCjCE,mFAJA,QACA,UAIE,CADF,iBACE,mCAGA,iDACE,+BAGF,wBAEA,mBAKA,6CAEF,CAHE,mBACA,CAEF,kCAIE,CARA,kBACA,CAFF,eASE,YACA,mBAGF,CAJE,UAIF,wCCjCA,oBDmCE,wBCpCJ,uCACE,8BACA,4CACA,oBAGA,2CCAA,6CAGE,CAPF,uBAIA,CDGA,gDACE,6BCVJ,CAWM,2CAEF,CAJA,kCAEE,CDJF,aCLF,gBDKE,uBCMA,gCAGA,gDAGE,wBAGJ,0BAEA,iBACE,aACF,CADE,UACF,uBACE,aACF,oBACE,YACF,4BACE,6CAMA,CAYF,6DAZE,mCAGE,iCASJ,4BAGE,4DADA,+BACA,CAFA,qBAEA,yBACE,aAEF,wBAHA,SAGA,iHACE,2DAKF,CANA,yCACE,CADF,oCAMA,uSAIA,sGACE,oDChEJ,WAEF,yBACE,QACA,eAEA,gBAEE,uCAGA,CALF,iCAKE,uCAGA,0BACA,CACA,oBACA,iCClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJClBF,YACA,gNAUE,6BAEF,oTAcI,kBACF,gHAIA,qBACE,eACF,qDACE,kBACF,6DACE,4BCxCJ,oBAEF,qCAEI,+CAGF,uBACE,uDAGJ,oBAkBE,mDAhBA,+CAaA,CAbA,oBAaA,0FAEE,CAFF,gGAbA,+BAaA,0BAGA,mQAIA,oNAEE,iBAGJ,CAHI,gBADA,gBAIJ,8CAYI,CAZJ,wCAYI,sVACE,iCAGA,uEAHA,QAGA,qXAKJ,iDAGF,CARM,+CACE,iDAIN,CALI,gBAQN,mHACE,gBAGF,2DACE,0EAOA,0EAKA,6EC/EA,iDACA,gCACA,oDAGA,qBACA,oDCFA,cACA,eAEA,yBAGF,sBAEE,iBACA,sNAWA,iBACE,kBACA,wRAgBA,kBAEA,iOAgBA,uCACE,uEAEA,kBAEF,qUAuBE,iDAIJ,CACA,geCxFF,4BAEE,CAQA,6JACA,iDAIA,sEAGA,mDAOF,iDAGE,4DAIA,8CACA,qDAEE,eAFF,cAEE,oBAEF,uBAFE,kCAGA,eACA,iBACA,mBAIA,mDACA,CAHA,uCAEA,CAJA,0CACA,CAIA,gBAJA,gBACA,oBADA,gBAIA,wBAEJ,gBAGE,6BACA,YAHA,iBAGA,gCACA,iEAEA,6CACA,sDACA,0BADA,wBACA,0BACA,oIAIA,mBAFA,YAEA,qBACA,0CAIE,uBAEF,CAHA,yBACE,CAEF,iDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIA,iBAJA,wBAIA,6CAJA,6CAOA,4BAGJ,CAHI,cAGJ,yCAGA,kBACE,CAIA,iDAEA,CATA,YAEF,CACE,4CAGA,kBAIA,wEAEA,wDAIF,kCAOE,iDACA,CARF,WAIE,sCAGA,CANA,2CACA,CAMA,oEARF,iBACE,CACA,qCAMA,iBAuBE,uBAlBF,YAKA,2DALA,uDAKA,CALA,sBAiBA,4CACE,CALA,gRAIF,YACE,UAEN,uBACE,YACA,mCAOE,+CAGA,8BAGF,+CAGA,4BCjNA,SDiNA,qFCjNA,gDAGA,sCACA,qCACA,sDAIF,CAIE,kDAGA,CAPF,0CAOE,kBAEA,kDAEA,CAHA,eACA,CAFA,YACA,CADA,SAIA,mHAIE,CAGA,6CAFA,oCAeE,CAbF,yBACE,qBAEJ,CAGE,oBACA,CAEA,YAFA,2CACF,CACE,uBAEA,mFAEE,CALJ,oBACE,CAEA,UAEE,gCAGF,sDAEA,yCC7CJ,oCAGA,CD6CE,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto\n display: block\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack: -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial,\n sans-serif, Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace: \"SFMono-Regular\", Menlo, Consolas, Monaco,\n Liberation Mono, Lucida Console, monospace;\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 * #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #646776; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2962ff;\n --color-brand-content: #2a5adf;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link-underline--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #ffffffcc; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2b8cee;\n --color-brand-content: #368ce2;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n // Override Firefox scrollbar style\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n // Override Chrome scrollbar styles\n &::-webkit-scrollbar\n width: 0.25rem\n height: 0.25rem\n &::-webkit-scrollbar-thumb\n background-color: var(--color-foreground-border)\n border-radius: 0.125rem\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n overflow-wrap: break-word\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n vertical-align: middle\n\n.theme-toggle\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n vertical-align: middle\n height: 1rem\n width: 1rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n float: left\n padding: 1rem 0.5rem\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1rem\n width: 1rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page svg\n color: inherit\n height: 1rem\n width: 1rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $full-width - $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n .theme-toggle-header\n display: block\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: var(--header-height)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Center the page, and accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n .content\n margin-left: auto\n margin-right: auto\n\n@media (max-width: $content-width + 2* $content-padding)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n .content\n padding: 0 $content-padding--small\n // Don't float sidebars to the right.\n article aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","//\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\n.admonition p.admonition-title,\np.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 3.5rem\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\n.versionmodified\n font-style: italic\ndiv.versionadded, div.versionchanged, div.deprecated\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n pre.literal-block &\n font-size: inherit\n padding: 0\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n pre\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > p,\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n",".table-wrapper\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n",":target\n scroll-margin-top: 0.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(0.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(0.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the