diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..aca63577 --- /dev/null +++ b/404.html @@ -0,0 +1,1621 @@ + + + + + + + + + + + + + + + + + + + + + + NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..0713cc29 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +www.jetson-ai-lab.com diff --git a/assets/80s-pop.wav b/assets/80s-pop.wav new file mode 100644 index 00000000..30606cb7 Binary files /dev/null and b/assets/80s-pop.wav differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.081f42fc.min.js b/assets/javascripts/bundle.081f42fc.min.js new file mode 100644 index 00000000..32734cd3 --- /dev/null +++ b/assets/javascripts/bundle.081f42fc.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?v(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(b(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),b(r=>Go.pipe(v(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(b(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),b(t=>en.pipe(v(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(v(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),v(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(b(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),v(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),v(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(b(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function G(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),b(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),b(c=>c?r:M),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(b(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(v(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(v(c=>c),ee(s,(c,l)=>l),v(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(v(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(v(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(v(({active:s})=>s)),i.pipe(_e(250),v(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),v(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),v(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),b(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(v(n=>n),Te(1),b(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),v(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(v(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),v(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),v(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),tt(e).pipe(b(()=>Na(n)),E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(b(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(v(({active:a})=>a)),i.pipe(_e(250),v(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),b(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(v(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(b(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),b(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),b(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),b(Xn),b(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(b(()=>e),Z("pathname"),b(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),b(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>d(document.body,"click").pipe(v(i=>!i.metaKey&&!i.ctrlKey),ee(o),b(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),b(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(v(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),b(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),b(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(v(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(v(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(v(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(v(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(v(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(v(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),v(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),v(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(b(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),b(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),b(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(v(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(b(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),v(o=>o),m(()=>r),Te(1))),v(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(b(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(b(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),v(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(v(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),b(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(b(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.081f42fc.min.js.map + diff --git a/assets/javascripts/bundle.081f42fc.min.js.map b/assets/javascripts/bundle.081f42fc.min.js.map new file mode 100644 index 00000000..e055db5a --- /dev/null +++ b/assets/javascripts/bundle.081f42fc.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +
+

Small Language Models (SLM)

+ + +

Small language models are generally defined as having fewer than 7B parameters (Llama-7B shown for reference)
+For more data and info about running these models, see the SLM tutorial and MLC container documentation.

+

Vision Language Models (VLM)

+ + +

This measures the end-to-end pipeline performance for continuous streaming like with Live Llava.
+For more data and info about running these models, see the NanoVLM tutorial.

+

Vision Transformers (ViT)

+ + +

VIT performance data from [1] [2] [3]

+

Stable Diffusion

+ + +

Riva

+ + +

For running Riva benchmarks, see ASR Performance and TTS Performance.

+

Vector Database

+ + +

For running vector database benchmarks, see the NanoDB container documentation.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/buy.md.bak b/buy.md.bak new file mode 100644 index 00000000..e4d72ec1 --- /dev/null +++ b/buy.md.bak @@ -0,0 +1,21 @@ +--- +hide: + - navigation + - toc +--- + +# Buy + +Get your Jetson developer kit to try generative AIs in your own local environment. + +!!! note + + The availablility and the price of Jetson developer kits vary depending on your region.
Please click "Jetson Store" button to find out the availability on the page next. + + +| | Product | Action | +|-|-|-| +| |
Jetson AGX Orin 64GB Developer Kit
  • AI Perf: 275 TOPS
  • GPU: NVIDIA Ampere architecture with 2048 NVIDIA CUDA cores and 64 tensor cores
  • CPU: 12-core Arm Cortex-A78AE v8.2 64-bit CPU 3MB L2 + 6MB L3
  • Memory: 64GB 256-bit LPDDR5 | 204.8 GB/s
  • Storage: 64GB eMMC 5.1
| Jetson Store | +| |
Jetson AGX Orin Developer Kit
  • AI Perf: 275 TOPS
  • GPU: NVIDIA Ampere architecture with 2048 NVIDIA CUDA cores and 64 tensor cores
  • CPU: 12-core Arm Cortex-A78AE v8.2 64-bit CPU 3MB L2 + 6MB L3
  • Memory: 32GB 256-bit LPDDR5 | 204.8 GB/s
  • Storage: 64GB eMMC 5.1
| Jetson Store | +| |
Jetson Orin Nano Developer Kit
  • AI Perf: 40 TOPS
  • GPU: 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores
  • CPU: 6-core Arm® Cortex®-A78AE v8.2 64-bit CPU 1.5MB L2 + 4MB L3
  • Memory: 8GB 128-bit LPDDR5 68 GB/s
  • Storage: SD Card Slot & external NVMe via M.2 Key M
| Jetson Store | + diff --git a/community_articles.html b/community_articles.html new file mode 100644 index 00000000..88a39c09 --- /dev/null +++ b/community_articles.html @@ -0,0 +1,2291 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Community Projects - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Community Projects

+

Below, you'll find a collection of guides, tutorials, and articles contributed by the community showcasing the implementation of generative AI on the Jetson platform.

+

Hackster AI-Powered Photorealistic Talking Avatar (4-26-2024)

+

Nurgaliyev Shakhizat creates an interactive talking avatar using ASR, TTS, LLM, and Audio2Face with NVIDIA Omniverse and Unreal Engine:

+ + +

Hackster An Eye for an Item (4-26-2024)

+

Allen Tao builds an indoor robot with Isaac ROS that maps your home and remembers where things are using SLAM and NanoDB!

+ + +

Hackster Escalator People Tracker (4-2-2024)

+

George Profenza & team install local CV solution for detecting and tracking people in large retail spaces to drive live Generative AI graphics:

+ + +

Hackster Edge Style: Fashion Preview at the Edge (4-1-2024)

+

AI-powered app from Andrei Ciobanu shows virtual try-ons with customer images, enhancing retail shopping using Jetson Orin for speed and privacy:

+ + +

Hackster Cooking meals with a local AI assistant on Jetson AXG Orin (4-1-2024)

+

Dimiter Kendri builds a multimodal, multi AI agent, fully local, conversational chatbot with multi agent research capabilities via speech queries:

+ + +

Hackster Realtime Language-Segment-Anything on Jetson Orin (3-4-2024)

+

Huy Mai enables Segment Anything (SAM) with natural language prompting using GroundingDINO for object detection:

+ + +

GitHub Japanese NMT Translation for Stable Diffusion (2-23-2024)

+

Toshihiko Aoki has created a prompt generator for stable-diffusion-webui that translates Japanese queries into English using a fine-tuned GPT-2 NMT model before feeding them into Stable Diffusion. Check out the full guide on GitHub under to-aoki/ja-tiny-sd-webui, including the training dataset and LoRA building!

+ + +

GitHub JetBot Voice to Action Tools: Empowering Your ROS2 Robot with Voice Control (2-17-2024)

+

Jen Hung Ho created ROS2 nodes for ASR/TTS on Jetson Nano that can can be used to control JetBot, including customizable voice commands and the execution of advanced actions. Check it out on GitHub under Jen-Hung-Ho/ros2_jetbot_tools and Jen-Hung-Ho/ros2_jetbot_voice and on the forums here.

+ + +

Hackster ClearWater: Underwater Image Enhancement with Generative AI (2-16-2024)

+

Vy Pham has created a novel denoising pipeline using a custom trained Transformer-based diffusion model and GAN upscaler for image enhancement, running on Jetson AGX Orin. It runs interactively in a Streamlit web UI for photo capturing and the processing of images and videos. Great work!

+ + +

Hackster AI-Powered Application for the Blind and Visually Impaired (12-13-2023)

+

Nurgaliyev Shakhizat demonstrates a locally-hosted Blind Assistant Device running on Jetson AGX Orin 64GB Developer Kit for realtime image-to-speech translation:

+

+
+

  Find more resources about this project here: [Hackster] [GitHub]

+
+

Dave's Armoury Bringing GLaDOS to life with Robotics and AI (2-8-2024)

+

See how DIY robotics legend Dave Niewinski from davesarmoury.com brings GLaDOS to life using Jetson AGX Orin, running LLMs onboard alongside object + depth tracking, and RIVA ASR/TTS with a custom-trained voice model for speech recognition and synthesis! Using Unitree Z1 arm with 3D printing and StereoLabs ZED2.

+ + +
+

  Find more resources about this project here: [Forums] [GitHub]

+
+

Hackster Seeed Studio's Local Voice Chatbot Puts a Speech-Recognizing LLaMa-2 LLM on Your Jetson (2-7-2024)

+

Seeed Studio has announced the launch of the Local Voice Chatbot, an NVIDIA Riva- and LLaMa-2-based large language model (LLM) chatbot with voice recognition capabilities — running entirely locally on NVIDIA Jetson devices, including the company's own reComputer range. Follow the step-by-step guide on the Seeed Studio wiki.

+ + +

YouTube GenAI Nerds React - Insider Look at NVIDIA's Newest Generative AI (2-6-2024)

+

Watch this panel about the latest trends & tech in edge AI, featuring Kerry Shih from OStream, Jim Benson from JetsonHacks, and Dusty from NVIDIA.

+ + +

NVIDIA Bringing Generative AI to Life with NVIDIA Jetson (11-7-2023)

+

Watch this webinar about deploying LLMs, VLMs, ViTs, and vector databases onboard Jetson Orin for building next-generation applications using Generative AI:

+ + +

JetsonHacks Jetson AI Labs – Generative AI Playground (10-31-2023)

+

JetsonHacks publishes an insightful video that walks developers through the typical steps for running generative AI models on Jetson following this site's tutorials. The video shows the interaction with the LLaVA model.

+ + +

Hackster Vision2Audio - Giving the blind an understanding through AI (10-15-2023)

+

Nurgaliyev Shakhizat demonstrates Vision2Audio running on Jetson AGX Orin 64GB Developer Kit to harness the power of LLaVA to help visually impaired people:

+

+

NVIDIA Generative AI Models at the Edge (10-19-2023)

+

Follow this walkthrough of the Jetson AI Lab tutorials along with coverage of the latest features and advances coming to JetPack 6 and beyond:

+ + +
+

  Technical Blog - https://developer.nvidia.com/blog/bringing-generative-ai-to-life-with-jetson/

+
+

Medium How to set up your Jetson device for LLM inference and fine-tuning (10-02-2023)

+

Michael Yuan's guide demonstrating how to set up the Jetson AGX Orin 64GB Developer Kit specifically for large language model (LLM) inference, highlighting the crucial role of GPUs and the cost-effectiveness of the Jetson AGX Orin for LLM tasks.

+
+

  https://medium.com/@michaelyuan_88928/how-to-set-up-your-jetson-device-for-llm-inference-and-fine-tuning-682e36444d43

+
+

Hackster Getting Started with AI on Nvidia Jetson AGX Orin Dev Kit (09-16-2023)

+

Nurgaliyev Shakhizat demonstrates llamaspeak on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

+

+

Hackster New AI Tool Is Generating a Lot of Buzz (09-13-2023)

+

Nick Bild provides an insightful introduction to the Jetson Generative AI Playground:

+
+

  https://www.hackster.io/news/new-ai-tool-is-generating-a-lot-of-buzz-3cc5f23a3598

+
+

JetsonHacks Use These! Jetson Docker Containers Tutorial (09-04-2023)

+

JetsonHacks has a in-depth tutorial on how to use jetson-containers and even show text-generation-webui and stable-diffusion-webui containers in action!

+ + +

Hackster LLaMa 2 LLMs w/ NVIDIA Jetson and textgeneration-web-ui (08-17-2023)

+

Paul DeCarlo demonstrates 13B and 70B parameter LLama 2 models running locally on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

+

+

Hackster Running a ChatGPT-Like LLM-LLaMA2 on a Nvidia Jetson Cluster (08-14-2023)

+

Discover how to run a LLaMA-2 7B model on an NVIDIA Jetson cluster in this insightful tutorial by Nurgaliyev Shakhizat:

+

+

JetsonHacks Speech AI on NVIDIA Jetson Tutorial (08-07-2023)

+

JetsonHacks gives a nice introduction to NVIDIA RIVA SDK and demonstrate its automated speech recognition (ASR) capability on Jetson Orin Nano Developer Kit.

+ + +

Hackster LLM based Multimodal AI w/ Azure Open AI & NVIDIA Jetson (07-12-2023)

+

Learn how to harness the power of Multimodal AI by running Microsoft JARVIS on an Jetson AGX Orin 64GB Developer Kit, enabling a wide range of AI tasks with ChatGPT-like capabilities, image generation, and more, in this comprehensive guide by Paul DeCarlo.

+

+

Hackster How to Run a ChatGPT-Like LLM on NVIDIA Jetson board (06-13-2023)

+

Nurgaliyev Shakhizat explores voice AI assistant on Jetson using FastChat and VoskAPI.

+

+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/css/colors.css b/css/colors.css new file mode 100644 index 00000000..08804d11 --- /dev/null +++ b/css/colors.css @@ -0,0 +1,327 @@ +[data-md-color-scheme="playful-purple"] { + + --md-accent-fg-color--rgb: 177, 43, 255; /* #b12bff */ + --md-accent-fg-color--hsl: 278, 100%, 58%; + --md-accent-fg-color: hsl(var(--md-accent-fg-color--hsl)); + + --md-accent-fg-color--dark-hsl: 278, 100%, 51%; + --md-accent-fg-color--dark: hsl(var(--md-accent-fg-color--dark-hsl)); + + --md-accent-fg-color--light-rgb: 214, 143, 255; /* D68FFF */ + --md-accent-fg-color--light-hsl: 278, 100%, 78%; + --md-accent-fg-color--light: hsla(var(--md-accent-fg-color--light-hsl), 1.0); + + --md-accent-bg-color--rgb: 255, 255, 255; /* #ffffff */ + --md-accent-bg-color: rgb(var(--md-accent-bg-color--rgb)); + + --md-primary-fg-color: hsl(210, 100%, 40%); + --md-primary-fg-color--light: hsl(210, 100%, 50%); + --md-primary-fg-color--dark: hsl(210, 100%, 20%); + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +[data-md-color-scheme="nv-black-green"] { + + --md-typeset-a-color: #000000; + + --md-accent-fg-color--rgb: #76b900; + --md-accent-fg-color--hsl: hsl(82, 100%, 36%); + --md-accent-fg-color: var(--md-accent-fg-color--hsl); + + --md-accent-fg-color--dark-hsl: hsl(82, 100%, 32%); + --md-accent-fg-color--dark: var(--md-accent-fg-color--dark-hsl); + + --md-accent-fg-color--light-rgb: #81BF15; + --md-accent-fg-color--light-hsl: hsla(82, 80%, 42%, 1.0); + --md-accent-fg-color--light: var(--md-accent-fg-color--light-hsl); + + --md-accent-bg-color--rgb: 255, 255, 255; /* #ffffff */ + --md-accent-bg-color: rgb(#000000); + + --md-primary-fg-color: hsl(82, 0%, 10%); + --md-primary-fg-color--light: hsl(82, 0%, 30%); + --md-primary-fg-color--dark: hsl(82, 0%, 0%); + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +a.nv-buy-link,.load-buy-link { + text-decoration: none; + background-color: #76b900; + color: #000; + border: none; + cursor: pointer; + line-height: 24px; + font-family: Arial,Helvetica,Sans-Serif!important; + transition: all .2s ease-out; + cursor: pointer; + display: inline-block; + font-weight: 700; + font-size: 1.2em; + padding: .625em 2em; +} + +a.nv-buy-link:hover,.load-buy-link:hover { + background-color: #91c733; + color: #000; +} + +.specs-container { + margin-top: 0.6rem; +} + +.specs-container li { + list-style: none; + margin-left: 0!important; + padding: 0!important; +} + +.specs-container ul { + padding: 0; + margin: 0 +} + +.specs { + font-weight: 400; + background: url(https://store.nvidia.com/jetson/store/UL-icon.d0c461d7202592cb.svg); + background-repeat: no-repeat; + background-position: 0 8px; + padding-left: 18px; + padding-bottom: 10px; + font-size: 16px +} + +[data-md-color-scheme="nvgreen"] { + + --md-primary-fg-color: #76B900; + --md-primary-fg-color--light: #C5E1A5; + --md-primary-fg-color--dark: #518000; + --md-accent-fg-color: #004D40; + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +[data-md-color-scheme="slate"] { + + --md-primary-fg-color: #76B900; + --md-primary-fg-color--light: #C5E1A5; + --md-primary-fg-color--dark: #518000; + --md-accent-fg-color: #a2ff00; + + --md-mermaid-node-bg-color: #518000; + --md-mermaid-node-fg-color: #e3f5c5; + --md-default-fg-color--lightest: #384e4c; + + --md-hue: 210; + } + +.numberCircle { + + font: Arial, sans-serif; + + width: 1.5em; + height: 1.5em; + box-sizing: initial; + + display: inline-block; + + background: #76B900; + color: #FFFFFF; + text-align: center; + border-radius: 100%; + + line-height: 1.5em; + box-sizing: content-box; +} + + +.roundSquare { + + font: Arial, sans-serif; + + height: 1.25em; + box-sizing: initial; + + display: inline-block; + + background: #004D40; + border: 0.1em solid #CCCCCC; + color: #FFFFFF; + text-align: center; + border-radius: 0.5em; + + line-height: 1.25em; + box-sizing: content-box; + + padding: 0.2em + +} + +.blobDarkGreen1 { + background: #0066467f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen2 { + background: #0085697f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen3 { + background: #00a48f7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen4 { + background: #00c5b97f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen5 { + background: #00e6e67f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} + +.blobLightGreen1 { + background: #76b9007f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen2 { + background: #7fc33a7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen3 { + background: #95d6767f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen4 { + background: #b1e8a97f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen5 { + background: #d5f8d87f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} + +.blobPink1 { + background: #ec84d07f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobPink2 { + background: #fe8fbe7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobPink3 { + background: #ff9fb27f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em; +} + +.blobGitHub { + background: #394657; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobJetsonHacks { + background: #394657; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobDavesArmoury { + background: #f7b700; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobHackster { + background: #2e9fe6; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobMedium { + background: #292929; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobNvidia { + background: #76b900; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobYouTube { + background: #FF0000; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobGreen { + background: #00AA00; color: #fff; font-size: 0.8em; border-radius: 0.3em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobWhite { + background: #EEE; color: #000; border-color: #666; border: solid 0.1em; font-size: 0.8em; border-radius: 0.3em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobBlack { + background: #222; color: #FFF; font-size: 0.8em; border-radius: 0.3em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.highlightYellow { + background: #ffc105; + border-radius: 0.5em; + padding: 0.2em +} + +.highlightGreen { + background: #00c753; + border-radius: 0.5em; + padding: 0.2em +} + +.highlightGray { + background: #DDDDDD; + border-radius: 0.5em; + padding: 0.2em; + color: #333333; +} + +.center50 { + display: block; + margin-left: auto; + margin-right: auto; + width: 50%; +} + +.right { + clear: none; + float: right; + width: 50%; + padding: 0rem 0.5rem 0rem 0.5rem ; +} + +.left { + clear: none; + float: left; + width: 50%; + padding: 0rem 0.5rem 0rem 0.5rem ; +} + +.right40 { + clear: none; + float: right; + width: 40%; + padding: 0rem 0.5rem 0rem 1.5rem ; +} + +.left40 { + clear: none; + float: left; + width: 40%; + padding: 0rem 1.5rem 0rem 0.5rem ; +} + +.dmark { + position: relative; + display: inline-block; + border-bottom: 1px dotted black; +} + +.dmark .dmarkpopup { + visibility: hidden; + width: 400px; + background-color: #DDDDDD; + color: #fff; + text-align: center; + border-radius: 12px; + padding: 15px; + margin: 5px; + box-shadow: 5px 5px 15px #004D40; + position: absolute; + z-index: 1; +} + +.dmark:hover .dmarkpopup { + visibility: visible; +} + +.md-footer__inner { display: none } diff --git a/css/extra.css b/css/extra.css new file mode 100644 index 00000000..4b8cf89a --- /dev/null +++ b/css/extra.css @@ -0,0 +1,119 @@ +.md-header { + background: #FFFFFF +} + +.md-logo { + display: none!important; +} + +.md-icon { + color: black; +} + +.md-header__title { + color: #000000; + margin-left: 0.6rem!important; +} + +.md-tabs { + background: #000000 +} + +.md-nav__link--active { + font-weight: 900; +} + +/* Bigger width for md-content for main text */ +.md-grid { + max-width: 90%; /* or 100%, if you want to stretch to full-width */ + } + +.md-banner__inner { + font-size: .7rem; + margin: 0.2rem auto; + margin-top: 0.2rem; + margin-right: auto; + margin-bottom: 0.2rem; + margin-left: auto; + padding: 0 0.8rem; +} + +.md-typeset a { + text-decoration: underline; + text-decoration-color: var(--md-accent-fg-color); + text-decoration-thickness: 2px; + text-underline-offset: 0.3125em; +} + +.md-typeset a:hover { + text-decoration: underline; + text-decoration-color: var(--md-primary-fg-color); + text-decoration-thickness: 2px; + text-underline-offset: 0.3125em; +} + +.global-footer__links{ + clear: both; + list-style-type: none!important; + margin: 0 0 4px!important; + padding: 0; +} + +.global-footer__links>li{ + display: inline-block; + margin-left: 5px!important; + margin-right: 5px!important; + margin-bottom: 0px!important; + padding-right: 10px; + position: relative; +} + +.global-footer__links>li:last-child:after { + display: none +} + +.global-footer__links>li>a { + color: #666; + display: inline-block; + font-size: 1pc; + font-weight: 100; + line-height: 24px; + text-decoration: none; +} + +.global-footer__links>li:after{ + background: #666; + content: " "; + height: 14px; + position: absolute; + right: 0; + top: 5px; + width: 1px; +} + +.global-footer__copyright{ + clear: both; + color: #666; + font-size: 9pt; + line-height: 1pc; +} + +.md-button { + margin-top: .5rem!important; + margin-right: .5rem!important; + color: var(--md-primary-fg-color)!important; + border-color: var(--md-accent-fg-color)!important; + text-decoration: none!important; + line-height: 1.25!important; + font-weight: 700!important; +} + +.md-button--primary { + background-color: var(--md-accent-fg-color)!important; + border-width: 0!important; +} + +.md-button--primary:focus, +.md-button--primary:hover { + background-color: var(--md-accent-fg-color--light)!important; +} \ No newline at end of file diff --git a/css/nvidia-font.css b/css/nvidia-font.css new file mode 100644 index 00000000..94c8892d --- /dev/null +++ b/css/nvidia-font.css @@ -0,0 +1,63 @@ +/* + ============================================= + NVIDIA Sans fonts + Owner: Vadim Leontjev (vleontjev@nvidia.com) + ============================================= +*/ + + + + +/* Set up for old browsers*/ +@supports not (font-variation-settings: normal) { + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Lt.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Lt.woff2") format("woff2"); + font-weight: 300; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Rg.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Rg.woff2") format("woff2"); + font-weight: 400; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Md.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Md.woff2") format("woff2"); + font-weight: 500; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Bd.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Bd.woff2") format("woff2"); + font-weight: 700; + font-style: normal; + } +} + +/* Set up for modern browsers, all weights */ +@supports (font-variation-settings: normal) { + @font-face { + font-family: 'NVIDIA-NALA'; + src: url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_NALA_W_Wght.woff2') format('woff2 supports variations'), + url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_NALA_W_Wght.woff2') format('woff2-variations'); + font-weight: 100 1000; + font-stretch: 25% 151%; + font-style: normal; + font-display: swap; + } + @font-face { + font-family: 'NVIDIA-NALA'; + src: url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_Wght_NALA_W_Italic.woff2') format('woff2 supports variations'), + url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_Wght_NALA_W_Italic.woff2') format('woff2-variations'); + font-weight: 100 1000; + font-stretch: 25% 151%; + font-style: italic; + font-display: swap; + } +} \ No newline at end of file diff --git a/getting-started.html b/getting-started.html new file mode 100644 index 00000000..0769aac7 --- /dev/null +++ b/getting-started.html @@ -0,0 +1,1645 @@ + + + + + + + + + + + + + + + + + + + + + + Getting started - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Getting started

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hello_ai_world.html b/hello_ai_world.html new file mode 100644 index 00000000..c3aacb27 --- /dev/null +++ b/hello_ai_world.html @@ -0,0 +1,1670 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Hello AI World - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Hello AI World

+

Hello AI World is an in-depth tutorial series for DNN-based inference and training of image classification, object detection, semantic segmentation, and more. It is built on the jetson-inference library using TensorRT for optimized performance on Jetson.

+

+

It's highly recommended to familiarize yourself with the concepts of machine learning and computer vision before diving into the more advanced topics of generative AI here on the Jetson AI Lab. Many of these models will prove useful to have during your development.

+

+

+

HELLO AI WORLD >> https://github.com/dusty-nv/jetson-inference

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/images/Chrome_ERR_CERT.png b/images/Chrome_ERR_CERT.png new file mode 100644 index 00000000..81d1841c Binary files /dev/null and b/images/Chrome_ERR_CERT.png differ diff --git a/images/Chrome_ERR_CERT_after_advanced.png b/images/Chrome_ERR_CERT_after_advanced.png new file mode 100644 index 00000000..9cc776a7 Binary files /dev/null and b/images/Chrome_ERR_CERT_after_advanced.png differ diff --git a/images/JON-with-Gcube.png b/images/JON-with-Gcube.png new file mode 100644 index 00000000..590f3b1f Binary files /dev/null and b/images/JON-with-Gcube.png differ diff --git a/images/JON_Gen-AI-panels.png b/images/JON_Gen-AI-panels.png new file mode 100644 index 00000000..498dd306 Binary files /dev/null and b/images/JON_Gen-AI-panels.png differ diff --git a/images/JON_Gen-AI-panels_1024px.jpg b/images/JON_Gen-AI-panels_1024px.jpg new file mode 100644 index 00000000..30458936 Binary files /dev/null and b/images/JON_Gen-AI-panels_1024px.jpg differ diff --git a/images/JON_Gen-AI-panels_1024px.png b/images/JON_Gen-AI-panels_1024px.png new file mode 100644 index 00000000..58251f72 Binary files /dev/null and b/images/JON_Gen-AI-panels_1024px.png differ diff --git a/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png b/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png new file mode 100644 index 00000000..ad9930cd Binary files /dev/null and b/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png differ diff --git a/images/TAM_15s_1080p.mp4 b/images/TAM_15s_1080p.mp4 new file mode 100644 index 00000000..671caa58 Binary files /dev/null and b/images/TAM_15s_1080p.mp4 differ diff --git a/images/TAM_screencast_cat_720p-80pcnt.mp4 b/images/TAM_screencast_cat_720p-80pcnt.mp4 new file mode 100644 index 00000000..bc67c630 Binary files /dev/null and b/images/TAM_screencast_cat_720p-80pcnt.mp4 differ diff --git a/images/TAM_screenshot.png b/images/TAM_screenshot.png new file mode 100644 index 00000000..2f378730 Binary files /dev/null and b/images/TAM_screenshot.png differ diff --git a/images/TAM_screenshot_cat.png b/images/TAM_screenshot_cat.png new file mode 100644 index 00000000..2c35f3f2 Binary files /dev/null and b/images/TAM_screenshot_cat.png differ diff --git a/images/audio-generation-2560x1440.jpg b/images/audio-generation-2560x1440.jpg new file mode 100644 index 00000000..be0afe85 Binary files /dev/null and b/images/audio-generation-2560x1440.jpg differ diff --git a/images/audiocraft_jupyterlab_demo.png b/images/audiocraft_jupyterlab_demo.png new file mode 100644 index 00000000..a585641b Binary files /dev/null and b/images/audiocraft_jupyterlab_demo.png differ diff --git a/images/balena_etcher.png b/images/balena_etcher.png new file mode 100644 index 00000000..be77fad8 Binary files /dev/null and b/images/balena_etcher.png differ diff --git a/images/cube_black.png b/images/cube_black.png new file mode 100644 index 00000000..5e5ed000 Binary files /dev/null and b/images/cube_black.png differ diff --git a/images/cube_green.png b/images/cube_green.png new file mode 100644 index 00000000..d2627f4d Binary files /dev/null and b/images/cube_green.png differ diff --git a/images/cube_white.png b/images/cube_white.png new file mode 100644 index 00000000..c8ed7148 Binary files /dev/null and b/images/cube_white.png differ diff --git a/images/distillation-2560x1440.jpg b/images/distillation-2560x1440.jpg new file mode 100644 index 00000000..3b62a41d Binary files /dev/null and b/images/distillation-2560x1440.jpg differ diff --git a/images/efficientvit_sam_demo.png b/images/efficientvit_sam_demo.png new file mode 100644 index 00000000..fbd0c71d Binary files /dev/null and b/images/efficientvit_sam_demo.png differ diff --git a/images/favicon.ico b/images/favicon.ico new file mode 100644 index 00000000..424df872 Binary files /dev/null and b/images/favicon.ico differ diff --git a/images/favicon_black.png b/images/favicon_black.png new file mode 100644 index 00000000..84d37802 Binary files /dev/null and b/images/favicon_black.png differ diff --git a/images/favicon_darkbluebg.png b/images/favicon_darkbluebg.png new file mode 100644 index 00000000..c11099a7 Binary files /dev/null and b/images/favicon_darkbluebg.png differ diff --git a/images/favicon_darkgreen.png b/images/favicon_darkgreen.png new file mode 100644 index 00000000..5bc77934 Binary files /dev/null and b/images/favicon_darkgreen.png differ diff --git a/images/favicon_darkgreenbg.png b/images/favicon_darkgreenbg.png new file mode 100644 index 00000000..beefc9e8 Binary files /dev/null and b/images/favicon_darkgreenbg.png differ diff --git a/images/favicon_navy.png b/images/favicon_navy.png new file mode 100644 index 00000000..fe0e2034 Binary files /dev/null and b/images/favicon_navy.png differ diff --git a/images/fw-update-progress_monitor.jpg b/images/fw-update-progress_monitor.jpg new file mode 100644 index 00000000..ad9453fc Binary files /dev/null and b/images/fw-update-progress_monitor.jpg differ diff --git a/images/fw_update_4.1-to-5.0.png b/images/fw_update_4.1-to-5.0.png new file mode 100644 index 00000000..02a35909 Binary files /dev/null and b/images/fw_update_4.1-to-5.0.png differ diff --git a/images/graph_llm-text-generation.png b/images/graph_llm-text-generation.png new file mode 100644 index 00000000..41153cc5 Binary files /dev/null and b/images/graph_llm-text-generation.png differ diff --git a/images/graph_llm-text-generation.svg b/images/graph_llm-text-generation.svg new file mode 100644 index 00000000..21196b45 --- /dev/null +++ b/images/graph_llm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/graph_vit-vision-transformers.svg b/images/graph_vit-vision-transformers.svg new file mode 100644 index 00000000..0daa5681 --- /dev/null +++ b/images/graph_vit-vision-transformers.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/graph_vlm-text-generation.svg b/images/graph_vlm-text-generation.svg new file mode 100644 index 00000000..86a86ffd --- /dev/null +++ b/images/graph_vlm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/icon_NanoDB_512px.png b/images/icon_NanoDB_512px.png new file mode 100644 index 00000000..505b6d09 Binary files /dev/null and b/images/icon_NanoDB_512px.png differ diff --git a/images/icon_NanoSAM.png b/images/icon_NanoSAM.png new file mode 100644 index 00000000..e9240b8d Binary files /dev/null and b/images/icon_NanoSAM.png differ diff --git a/images/icon_NanoSAM_512px.jpg b/images/icon_NanoSAM_512px.jpg new file mode 100644 index 00000000..19ab6d48 Binary files /dev/null and b/images/icon_NanoSAM_512px.jpg differ diff --git a/images/icon_NanoSAM_512px.png b/images/icon_NanoSAM_512px.png new file mode 100644 index 00000000..335f97fb Binary files /dev/null and b/images/icon_NanoSAM_512px.png differ diff --git a/images/icon_diffusion.png b/images/icon_diffusion.png new file mode 100644 index 00000000..f9cb33e6 Binary files /dev/null and b/images/icon_diffusion.png differ diff --git a/images/icon_diffusion_512px.jpg b/images/icon_diffusion_512px.jpg new file mode 100644 index 00000000..07b1d1df Binary files /dev/null and b/images/icon_diffusion_512px.jpg differ diff --git a/images/icon_diffusion_512px.png b/images/icon_diffusion_512px.png new file mode 100644 index 00000000..2c3dc683 Binary files /dev/null and b/images/icon_diffusion_512px.png differ diff --git a/images/icon_distillation.png b/images/icon_distillation.png new file mode 100644 index 00000000..6051b62e Binary files /dev/null and b/images/icon_distillation.png differ diff --git a/images/icon_text-generation_512px.jpg b/images/icon_text-generation_512px.jpg new file mode 100644 index 00000000..934b3c9d Binary files /dev/null and b/images/icon_text-generation_512px.jpg differ diff --git a/images/icon_text-generation_512px.png b/images/icon_text-generation_512px.png new file mode 100644 index 00000000..1435ac81 Binary files /dev/null and b/images/icon_text-generation_512px.png differ diff --git a/images/icon_text-generation_iso.png b/images/icon_text-generation_iso.png new file mode 100644 index 00000000..2458ec87 Binary files /dev/null and b/images/icon_text-generation_iso.png differ diff --git a/images/icon_vision-language-model.png b/images/icon_vision-language-model.png new file mode 100644 index 00000000..23a60e2d Binary files /dev/null and b/images/icon_vision-language-model.png differ diff --git a/images/icon_vision-language-model.png:Zone.Identifier b/images/icon_vision-language-model.png:Zone.Identifier new file mode 100644 index 00000000..bbbaf191 --- /dev/null +++ b/images/icon_vision-language-model.png:Zone.Identifier @@ -0,0 +1,3 @@ +[ZoneTransfer] +ZoneId=3 +HostUrl=https://www.figma.com/ diff --git a/images/icon_vision-language-model_512px.jpg b/images/icon_vision-language-model_512px.jpg new file mode 100644 index 00000000..7eb80d37 Binary files /dev/null and b/images/icon_vision-language-model_512px.jpg differ diff --git a/images/icon_vision-language-model_512px.png b/images/icon_vision-language-model_512px.png new file mode 100644 index 00000000..683e0eb6 Binary files /dev/null and b/images/icon_vision-language-model_512px.png differ diff --git a/images/iconnv_audio-generation_512x512.jpg b/images/iconnv_audio-generation_512x512.jpg new file mode 100644 index 00000000..dc6446b2 Binary files /dev/null and b/images/iconnv_audio-generation_512x512.jpg differ diff --git a/images/iconnv_distillation_512x512.jpg b/images/iconnv_distillation_512x512.jpg new file mode 100644 index 00000000..2ffdcd9a Binary files /dev/null and b/images/iconnv_distillation_512x512.jpg differ diff --git a/images/iconnv_image-generation_512x512.jpg b/images/iconnv_image-generation_512x512.jpg new file mode 100644 index 00000000..c4659307 Binary files /dev/null and b/images/iconnv_image-generation_512x512.jpg differ diff --git a/images/iconnv_llamaspeak_512x512.jpg b/images/iconnv_llamaspeak_512x512.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/images/iconnv_llamaspeak_512x512.jpg differ diff --git a/images/iconnv_nanodb_512x512.jpg b/images/iconnv_nanodb_512x512.jpg new file mode 100644 index 00000000..0e8d1340 Binary files /dev/null and b/images/iconnv_nanodb_512x512.jpg differ diff --git a/images/iconnv_nanosam_512x512.jpg b/images/iconnv_nanosam_512x512.jpg new file mode 100644 index 00000000..2a4e2456 Binary files /dev/null and b/images/iconnv_nanosam_512x512.jpg differ diff --git a/images/iconnv_text-generation_512x512.jpg b/images/iconnv_text-generation_512x512.jpg new file mode 100644 index 00000000..2feebd5c Binary files /dev/null and b/images/iconnv_text-generation_512x512.jpg differ diff --git a/images/iconnv_text-vision_512x512.jpg b/images/iconnv_text-vision_512x512.jpg new file mode 100644 index 00000000..dc04a1f6 Binary files /dev/null and b/images/iconnv_text-vision_512x512.jpg differ diff --git a/images/image-generation-2560x1440.jpg b/images/image-generation-2560x1440.jpg new file mode 100644 index 00000000..2b14d92b Binary files /dev/null and b/images/image-generation-2560x1440.jpg differ diff --git a/images/japanese-stable-diffusion.jpg b/images/japanese-stable-diffusion.jpg new file mode 100644 index 00000000..66fd4fdf Binary files /dev/null and b/images/japanese-stable-diffusion.jpg differ diff --git a/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png b/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png new file mode 100644 index 00000000..f926cf59 Binary files /dev/null and b/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png differ diff --git a/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png b/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png new file mode 100644 index 00000000..a442c0a1 Binary files /dev/null and b/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png differ diff --git a/images/jetson-orin-nano-dev-kit-sd-slot.png b/images/jetson-orin-nano-dev-kit-sd-slot.png new file mode 100644 index 00000000..dc6c134a Binary files /dev/null and b/images/jetson-orin-nano-dev-kit-sd-slot.png differ diff --git a/images/jon-iso_1200x900.png b/images/jon-iso_1200x900.png new file mode 100644 index 00000000..ca6ad442 Binary files /dev/null and b/images/jon-iso_1200x900.png differ diff --git a/images/llamaspeak-2560x1440.jpg b/images/llamaspeak-2560x1440.jpg new file mode 100644 index 00000000..dc921b5b Binary files /dev/null and b/images/llamaspeak-2560x1440.jpg differ diff --git a/images/llamaspeak_block_diagram.jpg b/images/llamaspeak_block_diagram.jpg new file mode 100644 index 00000000..4657102b Binary files /dev/null and b/images/llamaspeak_block_diagram.jpg differ diff --git a/images/m48-document-support-guide-256px-blk.png b/images/m48-document-support-guide-256px-blk.png new file mode 100644 index 00000000..e00c504f Binary files /dev/null and b/images/m48-document-support-guide-256px-blk.png differ diff --git a/images/m48-document-support-guide-256px-grn.png b/images/m48-document-support-guide-256px-grn.png new file mode 100644 index 00000000..b3ffa21e Binary files /dev/null and b/images/m48-document-support-guide-256px-grn.png differ diff --git a/images/m48-document-support-guide-256px-wht.png b/images/m48-document-support-guide-256px-wht.png new file mode 100644 index 00000000..2f158112 Binary files /dev/null and b/images/m48-document-support-guide-256px-wht.png differ diff --git a/images/m48-jetson-nano-256px-blk.png b/images/m48-jetson-nano-256px-blk.png new file mode 100644 index 00000000..8fa652de Binary files /dev/null and b/images/m48-jetson-nano-256px-blk.png differ diff --git a/images/m48-jetson-nano-256px-grn.png b/images/m48-jetson-nano-256px-grn.png new file mode 100644 index 00000000..5fb0097e Binary files /dev/null and b/images/m48-jetson-nano-256px-grn.png differ diff --git a/images/m48-jetson-nano-256px-wht.png b/images/m48-jetson-nano-256px-wht.png new file mode 100644 index 00000000..0d07589c Binary files /dev/null and b/images/m48-jetson-nano-256px-wht.png differ diff --git a/images/microsd_64gb.png b/images/microsd_64gb.png new file mode 100644 index 00000000..4ce53d5c Binary files /dev/null and b/images/microsd_64gb.png differ diff --git a/images/minigpt4_gleaners.gif b/images/minigpt4_gleaners.gif new file mode 100644 index 00000000..484d2994 Binary files /dev/null and b/images/minigpt4_gleaners.gif differ diff --git a/images/mmj_android.jpg b/images/mmj_android.jpg new file mode 100644 index 00000000..92c69a0a Binary files /dev/null and b/images/mmj_android.jpg differ diff --git a/images/mmj_app.gif b/images/mmj_app.gif new file mode 100644 index 00000000..71f6f5db Binary files /dev/null and b/images/mmj_app.gif differ diff --git a/images/mmj_livestream.gif b/images/mmj_livestream.gif new file mode 100644 index 00000000..db02d3d5 Binary files /dev/null and b/images/mmj_livestream.gif differ diff --git a/images/mmj_streamer.png b/images/mmj_streamer.png new file mode 100644 index 00000000..576166af Binary files /dev/null and b/images/mmj_streamer.png differ diff --git a/images/mmj_tutorial.gif b/images/mmj_tutorial.gif new file mode 100644 index 00000000..d1e188d5 Binary files /dev/null and b/images/mmj_tutorial.gif differ diff --git a/images/mmj_vlc.gif b/images/mmj_vlc.gif new file mode 100644 index 00000000..d58662a0 Binary files /dev/null and b/images/mmj_vlc.gif differ diff --git a/images/mmj_vst.png b/images/mmj_vst.png new file mode 100644 index 00000000..b0645437 Binary files /dev/null and b/images/mmj_vst.png differ diff --git a/images/multimodal_agent.jpg b/images/multimodal_agent.jpg new file mode 100644 index 00000000..be1684fc Binary files /dev/null and b/images/multimodal_agent.jpg differ diff --git a/images/nano_llm_docs.jpg b/images/nano_llm_docs.jpg new file mode 100644 index 00000000..96a6eb0d Binary files /dev/null and b/images/nano_llm_docs.jpg differ diff --git a/images/nano_llm_docs_chat.jpg b/images/nano_llm_docs_chat.jpg new file mode 100644 index 00000000..634e3618 Binary files /dev/null and b/images/nano_llm_docs_chat.jpg differ diff --git a/images/nanodb-2560x1440.jpg b/images/nanodb-2560x1440.jpg new file mode 100644 index 00000000..a03e9e40 Binary files /dev/null and b/images/nanodb-2560x1440.jpg differ diff --git a/images/nanoowl_chrome_window.png b/images/nanoowl_chrome_window.png new file mode 100644 index 00000000..5000052e Binary files /dev/null and b/images/nanoowl_chrome_window.png differ diff --git a/images/nanosam-2560x1440.jpg b/images/nanosam-2560x1440.jpg new file mode 100644 index 00000000..355ffd29 Binary files /dev/null and b/images/nanosam-2560x1440.jpg differ diff --git a/images/nvicon_llamaspeak_1200x1200.jpg b/images/nvicon_llamaspeak_1200x1200.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/images/nvicon_llamaspeak_1200x1200.jpg differ diff --git a/images/nvidia-favicon-rgb-16x16px@2x.png b/images/nvidia-favicon-rgb-16x16px@2x.png new file mode 100644 index 00000000..e81621c7 Binary files /dev/null and b/images/nvidia-favicon-rgb-16x16px@2x.png differ diff --git a/images/nvidia-l4t-bootloader-post-install-notification.png b/images/nvidia-l4t-bootloader-post-install-notification.png new file mode 100644 index 00000000..dc11db65 Binary files /dev/null and b/images/nvidia-l4t-bootloader-post-install-notification.png differ diff --git a/images/nvidia_logo_white.png b/images/nvidia_logo_white.png new file mode 100644 index 00000000..777219df Binary files /dev/null and b/images/nvidia_logo_white.png differ diff --git a/images/nvidia_logo_white_60.png b/images/nvidia_logo_white_60.png new file mode 100644 index 00000000..f0f9ac86 Binary files /dev/null and b/images/nvidia_logo_white_60.png differ diff --git a/images/nvidia_logo_white_65.png b/images/nvidia_logo_white_65.png new file mode 100644 index 00000000..9055574b Binary files /dev/null and b/images/nvidia_logo_white_65.png differ diff --git a/images/nvidia_logo_white_70.png b/images/nvidia_logo_white_70.png new file mode 100644 index 00000000..1e73a68c Binary files /dev/null and b/images/nvidia_logo_white_70.png differ diff --git a/images/sam_notebook.png b/images/sam_notebook.png new file mode 100644 index 00000000..afe3a4d5 Binary files /dev/null and b/images/sam_notebook.png differ diff --git a/images/slm_console.gif b/images/slm_console.gif new file mode 100644 index 00000000..db3bb2bb Binary files /dev/null and b/images/slm_console.gif differ diff --git a/images/slm_console_2.gif b/images/slm_console_2.gif new file mode 100644 index 00000000..655023d4 Binary files /dev/null and b/images/slm_console_2.gif differ diff --git a/images/ssd_nvme_1tb.png b/images/ssd_nvme_1tb.png new file mode 100644 index 00000000..5b417ee4 Binary files /dev/null and b/images/ssd_nvme_1tb.png differ diff --git a/images/stable-diffusion-webui_green-web.gif b/images/stable-diffusion-webui_green-web.gif new file mode 100644 index 00000000..8db8b840 Binary files /dev/null and b/images/stable-diffusion-webui_green-web.gif differ diff --git a/images/stable-diffusion-xl-central-park.jpg b/images/stable-diffusion-xl-central-park.jpg new file mode 100644 index 00000000..2028dcec Binary files /dev/null and b/images/stable-diffusion-xl-central-park.jpg differ diff --git a/images/stable-diffusion-xl-kids-workshop.jpg b/images/stable-diffusion-xl-kids-workshop.jpg new file mode 100644 index 00000000..dc14a01c Binary files /dev/null and b/images/stable-diffusion-xl-kids-workshop.jpg differ diff --git a/images/stable-diffusion-xl-model-select.jpg b/images/stable-diffusion-xl-model-select.jpg new file mode 100644 index 00000000..301e5493 Binary files /dev/null and b/images/stable-diffusion-xl-model-select.jpg differ diff --git a/images/stable-diffusion-xl-refiner-settings.jpg b/images/stable-diffusion-xl-refiner-settings.jpg new file mode 100644 index 00000000..74958b76 Binary files /dev/null and b/images/stable-diffusion-xl-refiner-settings.jpg differ diff --git a/images/stable-diffusion-xl-robot-generation.jpg b/images/stable-diffusion-xl-robot-generation.jpg new file mode 100644 index 00000000..ea8abefd Binary files /dev/null and b/images/stable-diffusion-xl-robot-generation.jpg differ diff --git a/images/stable-diffusion-xl-robot-mountain.jpg b/images/stable-diffusion-xl-robot-mountain.jpg new file mode 100644 index 00000000..0045138b Binary files /dev/null and b/images/stable-diffusion-xl-robot-mountain.jpg differ diff --git a/images/stable-diffusion-xl-robot-terminal.jpg b/images/stable-diffusion-xl-robot-terminal.jpg new file mode 100644 index 00000000..719abcac Binary files /dev/null and b/images/stable-diffusion-xl-robot-terminal.jpg differ diff --git a/images/stable-diffusion-xl-tokyo-gardens.jpg b/images/stable-diffusion-xl-tokyo-gardens.jpg new file mode 100644 index 00000000..a21078cf Binary files /dev/null and b/images/stable-diffusion-xl-tokyo-gardens.jpg differ diff --git a/images/stable-diffusion_space-ferret.png b/images/stable-diffusion_space-ferret.png new file mode 100644 index 00000000..48964e95 Binary files /dev/null and b/images/stable-diffusion_space-ferret.png differ diff --git a/images/text-generation-2560x1440.jpg b/images/text-generation-2560x1440.jpg new file mode 100644 index 00000000..e97a466f Binary files /dev/null and b/images/text-generation-2560x1440.jpg differ diff --git a/images/text-generation-webui_sf-trip.gif b/images/text-generation-webui_sf-trip.gif new file mode 100644 index 00000000..f5c4ed71 Binary files /dev/null and b/images/text-generation-webui_sf-trip.gif differ diff --git a/images/text-vision-2560x1440.jpg b/images/text-vision-2560x1440.jpg new file mode 100644 index 00000000..45293d14 Binary files /dev/null and b/images/text-vision-2560x1440.jpg differ diff --git a/images/tgwui_Download-model.png b/images/tgwui_Download-model.png new file mode 100644 index 00000000..2f0245a0 Binary files /dev/null and b/images/tgwui_Download-model.png differ diff --git a/images/tgwui_llava_drag-n-drop_birds.gif b/images/tgwui_llava_drag-n-drop_birds.gif new file mode 100644 index 00000000..a1030d12 Binary files /dev/null and b/images/tgwui_llava_drag-n-drop_birds.gif differ diff --git a/images/tgwui_model-download-animation.gif b/images/tgwui_model-download-animation.gif new file mode 100644 index 00000000..4e69e8e6 Binary files /dev/null and b/images/tgwui_model-download-animation.gif differ diff --git a/images/tgwui_multimodal_llava_fish.jpg b/images/tgwui_multimodal_llava_fish.jpg new file mode 100644 index 00000000..1bc00b0e Binary files /dev/null and b/images/tgwui_multimodal_llava_fish.jpg differ diff --git a/images/tgwui_multimodal_llava_spacewalk.png b/images/tgwui_multimodal_llava_spacewalk.png new file mode 100644 index 00000000..7db6a4a0 Binary files /dev/null and b/images/tgwui_multimodal_llava_spacewalk.png differ diff --git a/images/video_vila_wildfire.gif b/images/video_vila_wildfire.gif new file mode 100644 index 00000000..1b489030 Binary files /dev/null and b/images/video_vila_wildfire.gif differ diff --git a/images/voicecraft_load_models.png b/images/voicecraft_load_models.png new file mode 100644 index 00000000..56eaedd8 Binary files /dev/null and b/images/voicecraft_load_models.png differ diff --git a/images/whisper_ipywebrtc_widget.png b/images/whisper_ipywebrtc_widget.png new file mode 100644 index 00000000..8c28359b Binary files /dev/null and b/images/whisper_ipywebrtc_widget.png differ diff --git a/images/whisper_jupyterlab_notebooks.png b/images/whisper_jupyterlab_notebooks.png new file mode 100644 index 00000000..e6eca2e4 Binary files /dev/null and b/images/whisper_jupyterlab_notebooks.png differ diff --git a/images/whisper_microphone_access.png b/images/whisper_microphone_access.png new file mode 100644 index 00000000..70c87fe8 Binary files /dev/null and b/images/whisper_microphone_access.png differ diff --git a/images/whisper_transcribe_result.png b/images/whisper_transcribe_result.png new file mode 100644 index 00000000..93bf87d2 Binary files /dev/null and b/images/whisper_transcribe_result.png differ diff --git a/images/whisper_web_setting.png b/images/whisper_web_setting.png new file mode 100644 index 00000000..5736db66 Binary files /dev/null and b/images/whisper_web_setting.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..41142661 --- /dev/null +++ b/index.html @@ -0,0 +1,2270 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Home - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + + + + + +
+
+
+
+

Generative AI at the Edge

+

Bring generative AI to the world with NVIDIA® Jetson™

+ + Explore Tutorials + + +   Videos + + +
+
+ +
+
+
+
+ + +
+ +
+ + + + +
+
+
+ + +
+ + + +
+
+ + + +
+
+ + + +
+ Learn More +
+
+
+ + + + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/initial_setup_jon.html b/initial_setup_jon.html new file mode 100644 index 00000000..ff33c719 --- /dev/null +++ b/initial_setup_jon.html @@ -0,0 +1,2313 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 🚅 Initial Setup Guide - Jeton Orin Nano 🆕 - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Initial Setup Guide for Jetson Orin Nano Developer Kit

+

+
+

Note

+

This guide is to supplement the official Jetson Orin Nano Developer Kit Getting Started Guide.

+
+

The NVIDIA® Jetson Orin Nano™ Developer Kit is a perfect kit to start your journey of local generative AI evaluation and development.

+

This guide explains the complete flow from opening the box, updating the firmware if needed, flashing the latest JetPack 6.0 GA image on SD card, and the initial software setup, so that you will be ready for tutorials listed on this site and other AI projects.

+

Check your inventory

+

Following item is needed or highly desired to set up your Jetson Orin Nano Developer Kit.
+If you don't have them in your inventory, you want to arrange them and come back to this guide once they are available.

+
+

What not come in the box - What you need/want to prepare

+

Storage

+
    +
  • microSD card (64GB or bigger)
  • +
  • NVMe SSD (Optional, but highly recommended for following tutorials on this site)
  • +
+

+

Mean to access terminal

+

You need either of the following set:

+
    +
  • DisplayPort cable, DisplayPort capable monitor and an USB keyboard
  • +
  • DisplayPort to HDMI cable and HDMI capable monitor (or TV) and an USB keyboard
  • +
  • USB to TTL Serial cable (Advanced)
  • +
+
+

Open the box

+
+

What you find in the box

+

+

Jetson Orin Nano Developer Kit

+

The Jetson Orin Nano Developer Kit consists of Jetson Orin Nano module (enlarged SO-DIMM form factor), and the reference carrier board.

+

It is designed to use a microSD card as the primary storage, thus the module (that has a big black heat sink with a fan) has a microSD card slot at the bottom side of the module.

+

19V DC power supply

+
+

Overall flow

+
+

Jetson Orin Nano Initial Setup Flowchart (Click to expand)

+
flowchart
+    A(start) --> B{1. Check<br>Manufactured after May 2024?<br>i.e. Jetson UEFI Firmware<br>newer than version 36.0}
+    B --[YES] --> O[6. Flash JetPack 6.0 GA image on microSD card]
+    B --[No] --> C[2. Flash JetPack 5.1.3 image on microSD card]
+    C --> D[3. Reboot] --> E{{Firmware update during reboot}}
+    E --> F[4. Run QSPI updater] --> G[5. Reboot] --> H{{Firmware update during reboot}}
+    H --> O
+    O --> P(7. Start developing on JetPack 6.0) 
+
+    style C fill:#fee
+    style D fill:#DEE,stroke:#333
+    style G fill:#DEE,stroke:#333
+    style F stroke-width:4px
+    style E stroke-width:2px,stroke-dasharray: 5 5
+    style H stroke-width:2px,stroke-dasharray: 5 5
+    style O fill:#fee
+
+ + +

1. Check if Jetson UEFI Firmware version > 36.3

+

Your Jetson Orin Nano Developer Kit may have the latest firmware ("Jetson UEFI firmware" on QSPI-NOR flash memory) flashed at the factory.

+

If not, we need to go through a set of procedures to upgrade to the latest firmware. (Luckily, we can now do this all just on Jetson, meaning we don't need to use a host Ubuntu PC any more!)

+

So let's first check the version of your Jetson UEFI Firmware.
+You can take one of the following methods.

+
+
+
+
    +
  1. Connect your monitor and USB keyboard to your developer kit.
  2. +
  3. Turn on the developer kit by plugging in the bundled DC power supply
  4. +
  5. Repeatedly press Esc key on the keyboard, especially after NVIDIA logo boot splash screen first appears on the monitor
  6. +
  7. You should see UEFI setup menu screen
  8. +
  9. Check the third line from the top (below "Not specified"), which should be the version number of Jetson UEFI firmware
  10. +
+
+
+
    +
  1. Connect USB to TTL Serial cable onto the following pins on J14 "button" header of carrier board located under the Jetson module. +
  2. +
  3. On your PC, run your console monitor program and open the USB serial port.
  4. +
  5. Power on the developer kit by plugging in the bundled DC power supply
  6. +
  7. On the PC console, repeatedly press Esc key on the keyboard, especially after NVIDIA logo boot splash screen first appears on the monitor
  8. +
  9. You should see UEFI setup menu screen
  10. +
  11. Check the third line from the top (below "Not specified"), which should be the version number of Jetson UEFI firmware
  12. +
+
+
+
+

You could skip to 6. Flash JetPack 6.0 GA image onto your microSD card, and try your luck to see if your Jetson just boots your Jetson Orin Nano Developer Kit up to the initial software set up (OEM-config).

+
+
+
+
+

[< 36.0] Upgrade the Jetson UEFI firmware to 36.x

+
+

Attention

+

Select the appropriate tab below based on your firmware version you found in the above step.

+

If you found your Jetson Orin Nano needs its firmware updated to run JetPack 6, click " Firmware < 36.0" tab, and then additional step 2 to 5 will appear for you to follow.

+

If you know your Jetson Orin Nano has the latest firmware, stay on " Firmware 36.x" tab, and skip to the next section (6. Flash JetPack 6.0 GA image onto your microSD card)

+
+
+
+
+
+

Your Jetson Orin Nano has the latest firmware that is ready for JetPack 6 SD card.

+

Skip to the next section (6. Flash JetPack 6.0 GA image onto your microSD card)

+
+
+
+
+

Your Jetson Orin Nano needs its firmware updated in order to make JetPack 6 SD card work.

+

Perform the following steps (2 to 5).

+
+

2. Flash JetPack 5.1.3 image onto your microSD card

+

First we need to run JetPack 5.1.3 in order let its nvidia-l4t-bootloader package to get its bootloader/firmware updater activated, so that the firmware update automatically runs the next time it reboots.

+
    +
  1. +

    Download SD card image on to your PC

    +

    On your PC, download JetPack 5.1.3 image for Jetson Orin Nano Developer Kit from the official JetPack 5.1.3 page or from the below direct link button.

    +

    Jetson Orin Nano Developer Kit
    JetPack 5.1.3 image

    +
  2. +
  3. +

    Use Balena Etcher to flash image to SD card

    +

    If you don't have Balena Etcher on your PC, download from Balena official site.

    +

    alt text

    +
  4. +
  5. +

    Insert the flashed microSD card into the slot on Jetson module

    +

    +
  6. +
+

3. Power-on and reboot to ensure firmware gets updated to 5.0 (JetPack 5.1.3)

+
    +
  1. +

    Power-on

    +

    Turn on the Jetson Orin Nano Developer Kit with JetPack 5.1.3 SD card inserted by plugging in the DC power supply.

    +
  2. +
  3. +

    Complete the initial software setup (oem-config)

    +
  4. +
  5. +

    Ensure firmware update is scheduled.

    +

    Once Jetson boots into Jetson Linux system, a background service automatically runs to schedule a firmware update (if needed) to be performed during the next boot-up process.

    +

    Once you see the following, or just wait about 5 minutes after powering on to ensure the scheduling is done, reboot.

    +
    +
    +
    +

    +
    +
    +
    $ sudo systemctl status nv-l4t-bootloader-config
    +[sudo] password for jetson: 
    +● nv-l4t-bootloader-config.service - Configure bootloader service
    +    Loaded: loaded (/etc/systemd/system/nv-l4t-bootloader-config.service; enabled; vendor preset: enabled)
    +    Active: inactive (dead) since Fri 2024-05-03 13:36:13 PDT; 1min 57s ago
    +    Process: 11439 ExecStart=/opt/nvidia/l4t-bootloader-config/nv-l4t-bootloader-config.sh -v (code=exited, status=0/SUCCESS)
    +Main PID: 11439 (code=exited, status=0/SUCCESS)
    +
    +
    +
    +
    +
  6. +
  7. +

    Reboot

    +

    Reboot your Jetson Orin Nano Developer Kit.

    +

    You should see the following during the boot up process.

    +
    +
    +
    +

    +
    +
    +

    +
    +
    +
    +

    Once done, you will boot into JetPack 5.1.3 (again), with underlying firmware updated to 5.0-35550185.

    +
  8. +
+

4. Install and run QSPI Updater package

+
    +
  1. +

    Double-check your firmware version is up to date (35.5.0 = JetPack 5.1.3)

    +

    Once it reboots back into Jetson Linux system, on Jetson terminal, run the following:

    +
    sudo nvbootctl dump-slots-info
    +
    +

    You should see something like the following, with the Current version indicating 35.5.0.

    +
    Current version: 35.5.0
    +Capsule update status: 0
    +Current bootloader slot: A
    +Active bootloader slot: A
    +num_slots: 2
    +slot: 0,             status: normal
    +slot: 1,             status: normal
    +
    +
  2. +
  3. +

    Install QSPI Updater Debian package to trigger another (final) firmware update

    +

    On Jetson terminal, run the following:

    +
    sudo apt-get install nvidia-l4t-jetson-orin-nano-qspi-updater
    +
    +

    Installing the nvidia-l4t-jetson-orin-nano-qspi-updater automatically runs its script to schedule another (final) firmware update to be performed during the next boot process, so that the firmware is ready for JetPack 6.

    +
  4. +
+

5. Reboot and power-off the developer kit

+
    +
  1. +

    Reboot

    +

    Once the QSPI update is scheduled, reboot your Jetson Orin Nano Developer Kit.

    +
  2. +
  3. +

    Observe update

    +

    You can observe the update during the boot up process.

    +
  4. +
  5. +

    Power off

    +

    Once the update is done, it reboots and tries to boot, however it will get stuck UNLESS you change the SD card to JetPack 6 one.

    +

    Therefore you should just power off the developer kit by disconnecting the DC power supply.

    +
    +

    Attention

    +

    This part may look very confusing as neither the attached monitor nor the debug UART shows any explicit message on what action to take next.

    +

    What is going on here is that the Jetson's firmware (inside the QSPI-NOR flash memory) is now updated, ready for the JetPack 6 SD card, however it is now incompatible with JetPack 5.1.3 SD card left in the Jetson module's slot, so after the reboot it gets stuck in the boot process.

    +

    So there is nothing problematic with this boot halt (or endless rebooting). We just need to power-off and insert a new SD card.

    +
    +
  6. +
+
+
+
+

6. Flash JetPack 6.0 GA image onto your microSD card

+

Once we know the onboard firmware is up-to-date and ready for JetPack 6, we can boot Jetson Orin Nano Developer Kit with a microSD card for JetPack 6.

+
    +
  1. +

    Download SD card image on to your PC

    +

    On your PC, download JetPack 6.0 GA image for Jetson Orin Nano Developer Kit from the official JetPack 6.0 page or from the below direct link button.

    +

    Jetson Orin Nano Developer Kit
    JetPack 6.0 GA image

    +
  2. +
  3. +

    Use Balena Etcher to flash image to SD card

    +

    Insert your microSD card into your PC's SD card slot, and use Balena Etcher to flash the SD card with the image you just downloaded.

    +

    If you don't have Balena Etcher on your PC, download from Balena official site.

    +

    alt text

    +
  4. +
+

7. Power on to start developing on JetPack 6

+
    +
  1. +

    Insert the JetPack 6 microSD card into the slot on Jetson module

    +

    +
  2. +
  3. +

    Power-on by plugging the DC power supply

    +
  4. +
  5. +

    Complete the initial software setup (oem-config)

    +
  6. +
+

🎊 Congratulations!
+Your Jetson Orin Nano Developer Kit is set up with JetPack 6 SD card and you are ready to develop on JetPack 6.

+

Next step

+

NVMe SSD installation

+

Take a look at this page for installing NVMe SSD and setting up Docker with it.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/overrides/assets/style.css b/overrides/assets/style.css new file mode 100644 index 00000000..13fedc48 --- /dev/null +++ b/overrides/assets/style.css @@ -0,0 +1,879 @@ +:root { + --primary-color: #02007e; + --body-color: #f9f9f9; + --text-color: #636363; + --text-color-dark: #242738; + --white-color: #ffffff; + --light-color: #f8f9fa; + --font-family: Lato; +} + +body { + line-height: 1.5; + font-family: var(--font-family), sans-serif; + -webkit-font-smoothing: antialiased; + font-size: 17px; + color: var(--text-color); + background-color: var(--body-color) +} + +p { + color: var(--text-color); + font-size: 15px +} + +h1, +h2, +h3, +h4, +h5, +h6 { + color: var(--text-color-dark); + font-family: var(--font-family), sans-serif; + font-weight: 700; + line-height: 1.2 +} + +h1, +.h1 { + font-size: 45px +} + +h2, +.h2 { + font-size: 32px +} + +h3, +.h3 { + font-size: 26px +} + +h4, +.h4 { + font-size: 20px +} + +h5, +.h5 { + font-size: 18px +} + +h6, +.h6 { + font-size: 14px +} + +.btn { + font-size: 14px; + font-family: var(--font-family), sans-serif; + text-transform: uppercase; + padding: 16px 44px; + border-radius: 0; + font-weight: 600; + border: 0; + position: relative; + z-index: 1; + transition: .2s ease +} + +.btn:focus { + outline: 0; + box-shadow: none !important +} + +.btn:active { + box-shadow: none +} + +.btn-primary { + background: var(--primary-color); + color: var(--white-color) +} + +.btn-primary:active { + background: var(--primary-color) +} + +.btn-primary:hover { + background: var(--primary-color) +} + +.btn-primary:not(:disabled):not(.disabled).active, +.btn-primary:not(:disabled):not(.disabled):active, +.show>.btn-primary.dropdown-toggle { + color: var(--white-color); + background-color: var(--primary-color); + border-color: var(--primary-color) +} + +.inline-button { + line-height: .8rem !important; + padding: 5px 8px !important; + pointer-events: none; + margin-top: -5px +} + +.overflow-hidden { + overflow: hidden !important +} + +::-moz-selection { + background: var(--primary-color); + color: var(--white-color) +} + +::selection { + background: var(--primary-color); + color: var(--white-color) +} + +.preloader { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--white-color); + z-index: 999; + display: flex; + align-items: center; + justify-content: center +} + +ul { + list-style-type: none; + margin: 0; + padding-left: 0; + font-size: 15px +} + +ol { + padding-left: 20px; + font-size: 15px +} + +img { + vertical-align: middle; + border: 0 +} + +a, +a:hover, +a:focus { + text-decoration: none; + color: var(--primary-color) +} + +a:hover, +a:focus { + color: var(--primary-color) +} + +a, +button, +select { + cursor: pointer; + transition: .2s ease +} + +a:focus, +button:focus, +select:focus { + outline: 0 +} + +.slick-slide { + outline: 0 +} + +.section { + padding-top: 80px; + padding-bottom: 80px +} + +@media(max-width:768px) { + .section { + padding-top: 60px + } +} + +.section-sm { + padding-top: 60px; + padding-bottom: 60px +} + +@media(max-width:768px) { + .section-sm { + padding-top: 40px + } +} + +.section-title { + margin-bottom: 40px +} + +.bg-cover { + background-size: cover; + background-position: 50%; + background-repeat: no-repeat +} + +.border-primary { + border-color: #f2f2f2 !important +} + +pre { + padding: 20px +} + +.overlay { + position: relative +} + +.overlay::before { + position: absolute; + content: ''; + height: 100%; + width: 100%; + top: 0; + left: 0; + background: var(--primary-color); + opacity: .8 +} + +.outline-0 { + outline: 0 !important +} + +.d-unset { + display: unset !important +} + +.bg-primary { + background: var(--primary-color) !important +} + +.bg-white { + background-color: var(--white-color) !important +} + +.bg-light { + background-color: var(--light-color) !important +} + +.text-primary { + color: var(--primary-color) !important +} + +.text-color { + color: var(--text-color) !important +} + +.text-dark { + color: var(--text-color-dark) !important +} + +.text-white { + color: var(--white-color) !important +} + +.top-50 { + top: 50px +} + +.navbar { + padding: 0 +} + +@media(max-width:768px) { + .navbar { + padding: 10px 0 + } +} + +.navbar-brand img { + max-width: 100px; + margin-bottom: 0 +} + +.navbar .nav-item .nav-link { + text-transform: uppercase; + padding: 10px 15px !important; + font-size: 15px +} + +.navbar .dropdown:hover .dropdown-menu { + visibility: visible; + opacity: 1; + -webkit-transform: scaleX(1); + transform: scaleX(1) +} + +.navbar .dropdown-menu { + box-shadow: 0 3px 9px 0 rgba(0, 0, 0, .12); + padding: 15px 0; + border: 0; + top: 40px; + left: -35px; + border-radius: 0; + display: block; + visibility: hidden; + transition: .3s ease; + opacity: 0; + -webkit-transform: translateY(20px); + transform: translateY(20px); + background: var(--white-color) +} + +@media(max-width:768px) { + .navbar .dropdown-menu { + display: none; + opacity: 1; + visibility: visible; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: unset; + transform-origin: unset + } +} + +.navbar .dropdown-menu.view { + visibility: visible !important; + opacity: 1; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: top; + transform-origin: top +} + +@media(max-width:768px) { + .navbar .dropdown-menu.view { + display: block + } +} + +.navbar .dropdown-menu.show { + visibility: hidden +} + +@media(max-width:768px) { + .navbar .dropdown-menu.show { + visibility: visible; + display: block + } +} + +.navbar .dropdown-item { + position: relative; + color: var(--text-color-dark); + transition: .2s ease; + font-family: var(--font-family), sans-serif +} + +@media(max-width:768px) { + .navbar .dropdown-item { + text-align: center + } +} + +.navbar .dropdown-item:hover { + color: var(--primary-color); + background: 0 0 +} + +.lang-list { + background: var(--primary-color); + color: var(--white-color) +} + +.lang-list.dark { + color: var(--text-color-dark); + background: var(--white-color) +} + +.banner { + overflow: hidden +} + +.banner p { + font-size: 20px; + opacity: .8 +} + +.banner .nav-link.text-dark { + color: var(--white-color) !important +} + +.banner .nav-link.text-dark:hover { + color: var(--white-color) !important +} + +.banner .navbar-brand { + color: var(--white-color) !important +} + +#project-icon { + float: left; + height: 32px; + width: 32px +} + +#project-description { + margin: 0; + padding: 0 +} + +.ui-helper-hidden-accessible { + display: none +} + +.ui-menu { + background: var(--white-color); + padding: 5px 20px 20px; + right: 0 !important; + max-height: 200px; + overflow: hidden; + border-radius: 0 0 25px 25px; + z-index: 9999; + box-shadow: 0 13px 20px 0 rgba(0, 0, 0, .07) +} + +@media(max-width:575px) { + .ui-menu { + width: calc(100% - 30px) !important + } +} + +@media(min-width:576px) { + .ui-menu { + max-width: 510px !important + } +} + +@media(min-width:768px) { + .ui-menu { + max-width: 690px !important + } +} + +@media(min-width:992px) { + .ui-menu { + max-width: 610px !important + } +} + +@media(min-width:1200px) { + .ui-menu { + max-width: 730px !important + } +} + +.ui-menu-item a { + color: var(--text-color); + padding: 8px 0; + font-size: 15px +} + +.ui-menu-item a:hover { + color: var(--primary-color) +} + +.ui-menu-item:not(:last-child) { + border-bottom: 1px solid #e8e8e8 +} + +.ui-menu-item * { + display: none +} + +.ui-menu-item .ui-corner-all { + display: block +} + +.form-control { + height: 50px; + border-radius: 25px; + border: 0; + padding: 0 20px +} + +.form-control:focus { + border: 0; + box-shadow: none !important +} + +textarea.form-control { + height: 150px; + padding: 20px +} + +.icon { + font-size: 40px +} + +.shadow { + box-shadow: 0 5px 15px rgba(0, 0, 0, .07) !important; + transition: .3s ease +} + +.shadow-bottom { + box-shadow: 0 1px 0 rgba(12, 13, 14, .1), 0 1px 6px rgba(59, 64, 69, .1) +} + +.shadow:hover, +.shadow:focus { + box-shadow: 0 14px 25px rgba(0, 0, 0, .1) !important +} + +.content * { + margin-bottom: 20px +} + +.content img { + max-width: 100%; + height: auto; + margin: 0 auto 15px; + display: block; + text-align: center +} + +.content ul { + padding-left: 0; + margin-bottom: 20px +} + +.content ul li { + padding-left: 20px; + position: relative +} + +.content ul li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 8px +} + +.list-styled li { + padding-left: 20px; + position: relative +} + +.list-styled li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 17px +} + +.post-meta { + color: var(--text-color); + font-style: italic; + font-size: 14px +} + +blockquote { + font-size: 20px !important; + color: var(--text-color-dark); + padding: 20px 40px; + border-left: 2px solid var(--primary-color); + margin: 40px 0; + font-weight: 700; + background: var(--light-color) +} + +blockquote p { + margin-bottom: 0 !important +} + +.pagination { + justify-content: space-between +} + +.pagination a { + color: var(--primary-color) +} + +.pagination i { + font-size: 15px; + line-height: 1.8 +} + +#accordion i { + font-size: 14px; + line-height: 2 +} + +table { + text-align: left; + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + border: 1px solid #dee2e6 +} + +table td, +table th { + padding: .75rem; + vertical-align: top; + border: 1px solid #dee2e6; + margin-bottom: 0 +} + +thead { + background: #ececec; + margin-bottom: 0 +} + +tbody { + background: #f8f8f8; + margin-bottom: 0 +} + +.notices { + margin: 2rem 0; + position: relative +} + +.notices p { + padding: 10px +} + +.notices p::before { + position: absolute; + top: 2px; + color: #fff; + font-family: themify; + font-weight: 900; + content: "\e717"; + left: 10px +} + +.notices.note p { + border-top: 30px solid #6ab0de; + background: #e7f2fa +} + +.notices.note p::after { + content: 'Note'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.tip p { + border-top: 30px solid #78c578; + background: #e6f9e6 +} + +.notices.tip p::after { + content: 'Tip'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.info p { + border-top: 30px solid #f0b37e; + background: #fff2db +} + +.notices.info p::after { + content: 'Info'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.warning p { + border-top: 30px solid #e06f6c; + background: #fae2e2 +} + +.notices.warning p::after { + content: 'Warning'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.sidebar { + background-color: var(--white-color); + position: sticky; + top: 50px; + margin-bottom: 30px; + padding: 40px 10px 20px +} + +.sidelist { + display: block +} + +li.sidelist>a { + margin-left: 20px; + margin-bottom: 10px; + display: block; + font-size: 20px +} + +li.sidelist li a { + margin: 0 +} + +.sidelist li.sidelist { + display: block +} + +.sidelist li.sidelist.active a { + color: var(--primary-color) +} + +.sidelist li.sidelist.active::before { + opacity: 1 +} + +.page-list li a { + display: none +} + +.page-list li ul a { + display: block +} + +.sidelist li a { + color: var(--text-color-dark); + display: block; + font-size: 15px; + font-weight: 500; + padding: 10px 0; + line-height: 1.4 +} + +p:empty, +p a:empty { + display: none !important +} + +pre { + display: block; + padding: 9.5px; + margin: 10px 0 +} + +code { + margin-bottom: 0 !important; + font-size: 100% +} + +.back-btn { + position: relative +} + +.back-btn::before { + position: absolute; + font-family: themify; + content: "\e6bc"; + font-size: 25px; + height: 30px; + width: 40px; + background-color: var(--white-color); + color: inherit; + text-align: right; + z-index: 1; + left: -5px; + top: -5px +} + +.ui-autocomplete-input { + border-bottom: 1px solid #d4d4d4 !important +} + +.ui-autocomplete-input.active { + border-bottom-left-radius: 0; + border-bottom-right-radius: 0 +} + +.search-icon { + position: absolute; + right: 20px; + top: 18px; + font-size: 14px +} + +i { + transition: .2s ease +} + +a:hover i { + color: var(--primary-color) +} + +pre code::-webkit-scrollbar { + height: 5px +} + +pre code::-webkit-scrollbar-track { + background: #000 +} + +pre code::-webkit-scrollbar-thumb { + background: #888 +} + +.code-tabs { + border: 1px solid #dee2e6; + overflow: hidden; + margin: 20px 0 +} + +.code-tabs .tab-content { + padding: 20px 15px; + margin-bottom: 0 +} + +.code-tabs .tab-content .tab-pane { + margin-bottom: 0 +} + +.code-tabs .nav-tabs { + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item { + padding-left: 0; + border-right: 1px solid #dee2e6 +} + +.code-tabs .nav-tabs .nav-item .nav-link { + text-decoration: none; + font-weight: 500; + border: 0; + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item::before { + display: none +} + +.code-tabs .nav-tabs .nav-item.active { + background: var(--primary-color) +} + +.code-tabs .nav-tabs .nav-item.active .nav-link { + color: var(--white-color) +} \ No newline at end of file diff --git a/overrides/home.html b/overrides/home.html new file mode 100644 index 00000000..60acb4ce --- /dev/null +++ b/overrides/home.html @@ -0,0 +1,661 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + {{ super() }} +{% endblock %} +{% block tabs %} +{{ super() }} + + + +
+
+
+
+

Generative AI at the Edge

+

Bring generative AI to the world with NVIDIA® Jetson™

+ + Explore Tutorials + + +   Videos + + +
+
+ +
+
+
+
+ + +
+ +
+ + + + +
+
+
+ + +
+ + + +
+
+ + + +
+
+ + + +
+ Learn More +
+
+
+ + + + +
+ +
+ + + + + +{% endblock %} +{% block content %} + +{% endblock %} +{% block footer %} + +{% endblock %} diff --git a/overrides/home_navtop.html b/overrides/home_navtop.html new file mode 100644 index 00000000..674b3fc7 --- /dev/null +++ b/overrides/home_navtop.html @@ -0,0 +1,301 @@ + + + + +{% extends "base.html" %} +{% block tabs %} +{{ super() }} + + + + +
+
+
+
+ +
+
+

UP42 Python SDK

+

Access UP42's geospatial collections and processing workflows via Python.

+ + Get started + + + Go to GitHub + +
+
+
+
+ + +
+
+

+ + + + UP42 in Python +

+

Use UP42 via Python: order geospatial data, run analytic workflows, and + generate insights.

+
+
+

+ + + Python ecosystem +

+

Use UP42 together with your preferred Python libraries.

+
+
+

+ + Visualizations +

+

Interactive maps and visualizations. Ideal to use with Jupyter notebooks.

+
+
+ +
+
+
+
+ + + + +{% endblock %} +{% block content %}{% endblock %} +{% block footer %}{% endblock %} \ No newline at end of file diff --git a/overrides/images/JON-with-Gcube.png b/overrides/images/JON-with-Gcube.png new file mode 100644 index 00000000..590f3b1f Binary files /dev/null and b/overrides/images/JON-with-Gcube.png differ diff --git a/overrides/images/JON_Gen-AI-panels.png b/overrides/images/JON_Gen-AI-panels.png new file mode 100644 index 00000000..498dd306 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels.png differ diff --git a/overrides/images/JON_Gen-AI-panels_1024px.jpg b/overrides/images/JON_Gen-AI-panels_1024px.jpg new file mode 100644 index 00000000..30458936 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels_1024px.jpg differ diff --git a/overrides/images/JON_Gen-AI-panels_1024px.png b/overrides/images/JON_Gen-AI-panels_1024px.png new file mode 100644 index 00000000..58251f72 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels_1024px.png differ diff --git a/overrides/images/audio-generation-2560x1440.jpg b/overrides/images/audio-generation-2560x1440.jpg new file mode 100644 index 00000000..be0afe85 Binary files /dev/null and b/overrides/images/audio-generation-2560x1440.jpg differ diff --git a/overrides/images/distillation-2560x1440.jpg b/overrides/images/distillation-2560x1440.jpg new file mode 100644 index 00000000..3b62a41d Binary files /dev/null and b/overrides/images/distillation-2560x1440.jpg differ diff --git a/overrides/images/graph_llm-text-generation.png b/overrides/images/graph_llm-text-generation.png new file mode 100644 index 00000000..41153cc5 Binary files /dev/null and b/overrides/images/graph_llm-text-generation.png differ diff --git a/overrides/images/graph_llm-text-generation.svg b/overrides/images/graph_llm-text-generation.svg new file mode 100644 index 00000000..21196b45 --- /dev/null +++ b/overrides/images/graph_llm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/graph_vit-vision-transformers.svg b/overrides/images/graph_vit-vision-transformers.svg new file mode 100644 index 00000000..0daa5681 --- /dev/null +++ b/overrides/images/graph_vit-vision-transformers.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/graph_vlm-text-generation.svg b/overrides/images/graph_vlm-text-generation.svg new file mode 100644 index 00000000..86a86ffd --- /dev/null +++ b/overrides/images/graph_vlm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/icon_NanoDB_512px.png b/overrides/images/icon_NanoDB_512px.png new file mode 100644 index 00000000..505b6d09 Binary files /dev/null and b/overrides/images/icon_NanoDB_512px.png differ diff --git a/overrides/images/icon_NanoSAM.png b/overrides/images/icon_NanoSAM.png new file mode 100644 index 00000000..e9240b8d Binary files /dev/null and b/overrides/images/icon_NanoSAM.png differ diff --git a/overrides/images/icon_NanoSAM_512px.jpg b/overrides/images/icon_NanoSAM_512px.jpg new file mode 100644 index 00000000..19ab6d48 Binary files /dev/null and b/overrides/images/icon_NanoSAM_512px.jpg differ diff --git a/overrides/images/icon_NanoSAM_512px.png b/overrides/images/icon_NanoSAM_512px.png new file mode 100644 index 00000000..335f97fb Binary files /dev/null and b/overrides/images/icon_NanoSAM_512px.png differ diff --git a/overrides/images/icon_diffusion.png b/overrides/images/icon_diffusion.png new file mode 100644 index 00000000..f9cb33e6 Binary files /dev/null and b/overrides/images/icon_diffusion.png differ diff --git a/overrides/images/icon_diffusion_512px.jpg b/overrides/images/icon_diffusion_512px.jpg new file mode 100644 index 00000000..07b1d1df Binary files /dev/null and b/overrides/images/icon_diffusion_512px.jpg differ diff --git a/overrides/images/icon_diffusion_512px.png b/overrides/images/icon_diffusion_512px.png new file mode 100644 index 00000000..2c3dc683 Binary files /dev/null and b/overrides/images/icon_diffusion_512px.png differ diff --git a/overrides/images/icon_distillation.png b/overrides/images/icon_distillation.png new file mode 100644 index 00000000..6051b62e Binary files /dev/null and b/overrides/images/icon_distillation.png differ diff --git a/overrides/images/icon_text-generation_512px.jpg b/overrides/images/icon_text-generation_512px.jpg new file mode 100644 index 00000000..934b3c9d Binary files /dev/null and b/overrides/images/icon_text-generation_512px.jpg differ diff --git a/overrides/images/icon_text-generation_512px.png b/overrides/images/icon_text-generation_512px.png new file mode 100644 index 00000000..1435ac81 Binary files /dev/null and b/overrides/images/icon_text-generation_512px.png differ diff --git a/overrides/images/icon_text-generation_iso.png b/overrides/images/icon_text-generation_iso.png new file mode 100644 index 00000000..2458ec87 Binary files /dev/null and b/overrides/images/icon_text-generation_iso.png differ diff --git a/overrides/images/icon_vision-language-model.png b/overrides/images/icon_vision-language-model.png new file mode 100644 index 00000000..23a60e2d Binary files /dev/null and b/overrides/images/icon_vision-language-model.png differ diff --git a/overrides/images/icon_vision-language-model.png:Zone.Identifier b/overrides/images/icon_vision-language-model.png:Zone.Identifier new file mode 100644 index 00000000..bbbaf191 --- /dev/null +++ b/overrides/images/icon_vision-language-model.png:Zone.Identifier @@ -0,0 +1,3 @@ +[ZoneTransfer] +ZoneId=3 +HostUrl=https://www.figma.com/ diff --git a/overrides/images/icon_vision-language-model_512px.jpg b/overrides/images/icon_vision-language-model_512px.jpg new file mode 100644 index 00000000..7eb80d37 Binary files /dev/null and b/overrides/images/icon_vision-language-model_512px.jpg differ diff --git a/overrides/images/icon_vision-language-model_512px.png b/overrides/images/icon_vision-language-model_512px.png new file mode 100644 index 00000000..683e0eb6 Binary files /dev/null and b/overrides/images/icon_vision-language-model_512px.png differ diff --git a/overrides/images/iconnv_audio-generation_512x512.jpg b/overrides/images/iconnv_audio-generation_512x512.jpg new file mode 100644 index 00000000..dc6446b2 Binary files /dev/null and b/overrides/images/iconnv_audio-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_distillation_512x512.jpg b/overrides/images/iconnv_distillation_512x512.jpg new file mode 100644 index 00000000..2ffdcd9a Binary files /dev/null and b/overrides/images/iconnv_distillation_512x512.jpg differ diff --git a/overrides/images/iconnv_image-generation_512x512.jpg b/overrides/images/iconnv_image-generation_512x512.jpg new file mode 100644 index 00000000..c4659307 Binary files /dev/null and b/overrides/images/iconnv_image-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_llamaspeak_512x512.jpg b/overrides/images/iconnv_llamaspeak_512x512.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/overrides/images/iconnv_llamaspeak_512x512.jpg differ diff --git a/overrides/images/iconnv_nanodb_512x512.jpg b/overrides/images/iconnv_nanodb_512x512.jpg new file mode 100644 index 00000000..0e8d1340 Binary files /dev/null and b/overrides/images/iconnv_nanodb_512x512.jpg differ diff --git a/overrides/images/iconnv_nanosam_512x512.jpg b/overrides/images/iconnv_nanosam_512x512.jpg new file mode 100644 index 00000000..2a4e2456 Binary files /dev/null and b/overrides/images/iconnv_nanosam_512x512.jpg differ diff --git a/overrides/images/iconnv_text-generation_512x512.jpg b/overrides/images/iconnv_text-generation_512x512.jpg new file mode 100644 index 00000000..2feebd5c Binary files /dev/null and b/overrides/images/iconnv_text-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_text-vision_512x512.jpg b/overrides/images/iconnv_text-vision_512x512.jpg new file mode 100644 index 00000000..dc04a1f6 Binary files /dev/null and b/overrides/images/iconnv_text-vision_512x512.jpg differ diff --git a/overrides/images/image-generation-2560x1440.jpg b/overrides/images/image-generation-2560x1440.jpg new file mode 100644 index 00000000..2b14d92b Binary files /dev/null and b/overrides/images/image-generation-2560x1440.jpg differ diff --git a/overrides/images/japanese-stable-diffusion.jpg b/overrides/images/japanese-stable-diffusion.jpg new file mode 100644 index 00000000..66fd4fdf Binary files /dev/null and b/overrides/images/japanese-stable-diffusion.jpg differ diff --git a/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png b/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png new file mode 100644 index 00000000..a442c0a1 Binary files /dev/null and b/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png differ diff --git a/overrides/images/llamaspeak-2560x1440.jpg b/overrides/images/llamaspeak-2560x1440.jpg new file mode 100644 index 00000000..dc921b5b Binary files /dev/null and b/overrides/images/llamaspeak-2560x1440.jpg differ diff --git a/overrides/images/nanodb-2560x1440.jpg b/overrides/images/nanodb-2560x1440.jpg new file mode 100644 index 00000000..a03e9e40 Binary files /dev/null and b/overrides/images/nanodb-2560x1440.jpg differ diff --git a/overrides/images/nanosam-2560x1440.jpg b/overrides/images/nanosam-2560x1440.jpg new file mode 100644 index 00000000..355ffd29 Binary files /dev/null and b/overrides/images/nanosam-2560x1440.jpg differ diff --git a/overrides/images/nvicon_llamaspeak_1200x1200.jpg b/overrides/images/nvicon_llamaspeak_1200x1200.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/overrides/images/nvicon_llamaspeak_1200x1200.jpg differ diff --git a/overrides/images/text-generation-2560x1440.jpg b/overrides/images/text-generation-2560x1440.jpg new file mode 100644 index 00000000..e97a466f Binary files /dev/null and b/overrides/images/text-generation-2560x1440.jpg differ diff --git a/overrides/images/text-vision-2560x1440.jpg b/overrides/images/text-vision-2560x1440.jpg new file mode 100644 index 00000000..45293d14 Binary files /dev/null and b/overrides/images/text-vision-2560x1440.jpg differ diff --git a/overrides/main.html b/overrides/main.html new file mode 100644 index 00000000..958e45c9 --- /dev/null +++ b/overrides/main.html @@ -0,0 +1,42 @@ + +{% extends "base.html" %} + + +{# +{% block announce %} + + + + + +{% endblock %} +#} + +{% block scripts %} + +{{ super() }} + + +{% endblock %} diff --git a/research.html b/research.html new file mode 100644 index 00000000..8135e13e --- /dev/null +++ b/research.html @@ -0,0 +1,2428 @@ + + + + + + + + + + + + + + + + + + + + + + + + Jetson AI Lab Research Group - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Jetson AI Lab Research Group

+

The Jetson AI Lab Research Group is a global collective for advancing open-source Edge ML, open to anyone to join and collaborate with others from the community and leverage each other's work. Our goal is using advanced AI for good in real-world applications in accessible and responsible ways. By coordinating together as a group, we can keep up with the rapidly evolving pace of AI and more quickly arrive at deploying intelligent multimodal agents and autonomous robots into the field.

+

There are virtual meetings that anyone is welcome to join, offline discussion on the Jetson Projects forum, and guidelines for upstreaming open-source contributions.

+
+

Next Meeting - 5/29

+

The next team meeting is on Wednesday, May 29th at 9am PST - see the invite below or click here to join the meeting.

+ +
+

Topics of Interest

+

These are some initial research topics for us to discuss and investigate. This list will vary over time as experiments evolve and the SOTA progresses:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
• Controller LLMs for dynamic pipeline code generation• Fine-tuning LLM/VLM onboard Jetson AGX Orin 64GB
• HomeAssistant.io integration for smart home [1] [2]• Continuous multi-image VLM streaming and change detection
• Recurrent LLM architectures (Mamba, RKVW, ect) [1]• Lightweight low-memory streaming ASR/TTS models
• Diffusion models for image processing and enhancement• Time Series Forecasting with Transformers [1] [2]
• Guidance, grammars, and guardrails for constrained output• Inline LLM function calling / plugins from API definitions
• ML DevOps, edge deployment, and orchestration• Robotics, IoT, and cyberphysical systems integration
+ +

New topics can be raised to the group either during the meetings or on the forums (people are welcome to work on whatever they want of course)

+

Contribution Guidelines

+ + +

When experiments are successful, ideally the results will be packaged in such a way that they are easily reusable for others to integrate into their own projects:

+
+ + + + + + +
+ +

Ongoing technical discussions are encouraged to occur on the forums or GitHub Issues, with status updates on projects given during the meetings.

+

Meeting Schedule

+

We'll aim to meet monthly or bi-weekly as a team in virtual meetings that anyone is welcome to join and speak during. We'll discuss the latest updates and experiments that we want to explore. Please remain courteous to others during the calls. We'll stick around after for anyone who has questions or didn't get the chance to be heard.

+
+

Wednesday, May 29 at 9am PST (5/29/24)

+ +
+ + +

The agenda will be listed here beforehand - post to the forum to add agenda items. The meetings will be recorded so anyone unable to attend live can watch them after.

+

Past Meetings

+
May 15, 2024 + +
+ +

Topics Covered:

+ + + +
+ +
May 1, 2024 + +
+ +
April 17, 2024 + +
+ +

Topics Covered:

+ + + +
+ +
April 3, 2024 + +
+ +

Active Members

+

Below are shown some of the sustaining members of the group who have been working on generative AI in edge computing:

+ + +
+
+
+
+ + + + Dustin Franklin, NVIDIA
+ + Principal Engineer | Pittsburgh, PA
+ (jetson-inference, jetson-containers) +
+
+
+
+
+
+ + + Nurgaliyev Shakhizat
+ + Institute of Smart Systems and AI | Kazakhstan
+ (Assistive Devices, Vision2Audio, HPC) +
+
+
+
+
+
+ + + Kris Kersey, Kersey Fabrications
+ + Embedded Software Engineer | Atlanta, GA
+ (The OASIS Project, AR/VR, 3D Fabrication) +
+
+
+
+
+
+ + + Johnny Núñez Cano
+ + PhD Researcher in CV/AI | Barcelona, Spain
+ (Recurrent LLMs, Pose & Behavior Analysis) +
+
+
+
+
+
+ + + Doruk Sönmez, Open Zeka
+ + Intelligent Video Analytics Engineer | Turkey
+ (NVIDIA DLI Certified Instructor, IVA, VLM) +
+
+
+
+
+
+ + + + Akash James, Spark Cognition
+ + AI Architect, UC Berkeley Researcher | Oakland
+ (NVIDIA AI Ambassador, Personal Assistants) +
+
+
+
+
+
+ + + Mieszko Syty, MS/1 Design
+ + AI/ML Engineer | Warsaw, Poland
+ (LLM, Home Assistants, ML DevOps) +
+
+
+
+
+
+ + + Jim Benson, JetsonHacks
+ + DIY Extraordinaire | Los Angeles, CA
+ (AI in Education, RACECAR/J) +
+
+
+
+
+
+ + + Chitoku Yato, NVIDIA
+ + Jetson AI DevTech | Santa Clara, CA
+ (JetBot, JetRacer, MinDisk, Containers) +
+
+
+
+
+
+ + Dana Sheahen, NVIDIA
+ + DLI Curriculum Developer | Santa Clara, CA
+ (AI in Education, Jetson AI Fundamentals) +
+
+
+
+
+
+ + + Sammy Ochoa, NVIDIA
+ + Jetson AI DevTech | Austin, TX
+ (Metropolis Microservices) +
+
+
+
+
+
+ + + John Welsh, NVIDIA
+ + (NanoOWL, NanoSAM, JetBot, JetRacer, torch2trt, trt_pose, Knowledge Distillation) + +
+
+
+
+
+ + + + Dave Niewinski
+ + Dave's Armoury | Waterloo, Ontario
+ (GLaDOS, Fetch, Offroad La-Z-Boy, KUKA Bot) +
+
+
+
+
+
+ + + + Gary Hilgemann, REBOTNIX
+ + CEO & AI Roboticist | Lünen, Germany
+ (GUSTAV, SPIKE, VisionTools, GenAI) +
+
+
+
+
+
+ + + + Elaine Wu, Seeed Studio
+ + AI & Robotics Partnerships | Shenzhen, China
+ (reComputer, YOLOv8, LocalJARVIS, Voice Bot) +
+
+
+
+
+
+ + Patty Delafuente, NVIDIA
+ + Data Scientist & UMBC PhD Student | MD
+ (AI in Education, DLI Robotics Teaching Kit) +
+
+
+
+
+
+ + + Song Han, MIT HAN Lab
+ + NVIDIA Research | Cambridge, MA
+ (Efficient Large Models, AWQ, VILA) +
+
+
+
+
+
+ + + Bryan Hughes, Mimzy AI
+ + Founder, Entrepreneur | SF Bay Area
+ (Multimodal Assistants, AI at the Edge) +
+
+
+
+
+
+ + + Tianqi Chen, CMU Catalyst
+ + OctoML, CTO | Seattle, WA
+ (MLC, Apache TVM, XGBoost) +
+
+
+
+
+
+ + + Michael Grüner, RidgeRun
+ + Team Lead / Architect | Costa Rica
+ (Embedded Vision & AI, Multimedia) +
+
+
+
+
+
+ + Jesse Flot, CMU Robotics Academy
+ + Co-Director | Pittsburgh, PA
+ (Applied AI & Robotics, Autonomy Foundations) +
+
+
+
+
+
+ + + + Paul DeCarlo, Microsoft
+ + Professor | University of Houston
+ (Azure IoT, Video Analytics, Microsoft JARVIS) +
+
+
+
+
+
+ + + Mike Hansen, Nabu Casa
+ + Voice Engineering Lead | Urbandale, Iowa
+ (Home Assistant, Piper TTS, Wyoming) +
+
+
+
+
+
+ + + Lakshantha Dissanayake, Ultralytics
+ + Embedded CV Engineer | Vancouver, BC
+ (YOLOv8, TensorRT, DeepStream) +
+
+
+
+
+
+ + + Kerry Shih, OStream
+ + Founder, CEO | Los Angeles, CA
+ (GenRunner, GenAI Nerds) +
+
+
+
+
+
+ + Ziad Youssfi, CMU
+ + ECE Professor | Pittsburgh, PA
+ (ML in Robotics & Embedded Systems) +
+
+
+
+
+
+ + + Walter Lucetti, Stereolabs
+ + Robotics & Vision Engineer | Paris, France
+ (MyzharBot, ROS2, GStreamer) +
+
+
+
+
+
+ + + + Raffaello Bonghi, NVIDIA
+ + AI & Robotics Engineer | Manchester, UK
+ (Nanosaur, Panther, jetson-stats) +
+
+
+
+
+
+ + Alvaro Costa, ANS Group
+ + AI & Robotics Lead | Epsom, UK
+ (TeknTrash, StipraPOD) +
+
+
+
+
+
+ + David Pearson, ConnectTech
+ + Embedded Systems Engineer | Ontario, CA
+ (Edge AI Systems, Vision/Language Models) +
+
+
+
+
+
+ + + + Jason Seawall, Numurus
+ + CEO | Seattle, WA
+ (NEPI, Edge AI & Automation) +
+
+
+
+
+
+ + + + Martin Cerven
+ + AI Researcher | Germany
+ (Autonomous Robotics, Voicecraft) +
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/research/images/Akash_James.jpg b/research/images/Akash_James.jpg new file mode 100644 index 00000000..b8ccc947 Binary files /dev/null and b/research/images/Akash_James.jpg differ diff --git a/research/images/Alvaro_Costa.jpg b/research/images/Alvaro_Costa.jpg new file mode 100644 index 00000000..74848641 Binary files /dev/null and b/research/images/Alvaro_Costa.jpg differ diff --git a/research/images/Bryan_Hughes.jpg b/research/images/Bryan_Hughes.jpg new file mode 100644 index 00000000..c65efefa Binary files /dev/null and b/research/images/Bryan_Hughes.jpg differ diff --git a/research/images/Chitoku_Yato.jpg b/research/images/Chitoku_Yato.jpg new file mode 100644 index 00000000..33d91417 Binary files /dev/null and b/research/images/Chitoku_Yato.jpg differ diff --git a/research/images/Dana_Sheahen.jpg b/research/images/Dana_Sheahen.jpg new file mode 100644 index 00000000..12b6e397 Binary files /dev/null and b/research/images/Dana_Sheahen.jpg differ diff --git a/research/images/Dave_Niewinski.jpg b/research/images/Dave_Niewinski.jpg new file mode 100644 index 00000000..ffa5a7a0 Binary files /dev/null and b/research/images/Dave_Niewinski.jpg differ diff --git a/research/images/David_Pearson.jpg b/research/images/David_Pearson.jpg new file mode 100644 index 00000000..830eedc0 Binary files /dev/null and b/research/images/David_Pearson.jpg differ diff --git a/research/images/Doruk_Sonmez.png b/research/images/Doruk_Sonmez.png new file mode 100644 index 00000000..a1dd08cc Binary files /dev/null and b/research/images/Doruk_Sonmez.png differ diff --git a/research/images/Dustin_Franklin.jpg b/research/images/Dustin_Franklin.jpg new file mode 100644 index 00000000..702e9495 Binary files /dev/null and b/research/images/Dustin_Franklin.jpg differ diff --git a/research/images/Elaine_Wu.jpg b/research/images/Elaine_Wu.jpg new file mode 100644 index 00000000..f7a589b0 Binary files /dev/null and b/research/images/Elaine_Wu.jpg differ diff --git a/research/images/Gary_Hilgemann.jpg b/research/images/Gary_Hilgemann.jpg new file mode 100644 index 00000000..faea2b38 Binary files /dev/null and b/research/images/Gary_Hilgemann.jpg differ diff --git a/research/images/Jason_Seawall.jpg b/research/images/Jason_Seawall.jpg new file mode 100644 index 00000000..d5f8e662 Binary files /dev/null and b/research/images/Jason_Seawall.jpg differ diff --git a/research/images/Jesse_Flot.jpg b/research/images/Jesse_Flot.jpg new file mode 100644 index 00000000..8a3b36ac Binary files /dev/null and b/research/images/Jesse_Flot.jpg differ diff --git a/research/images/Jim_Benson.png b/research/images/Jim_Benson.png new file mode 100644 index 00000000..3054da91 Binary files /dev/null and b/research/images/Jim_Benson.png differ diff --git a/research/images/John_Welsh.jpg b/research/images/John_Welsh.jpg new file mode 100644 index 00000000..d7f3cb96 Binary files /dev/null and b/research/images/John_Welsh.jpg differ diff --git a/research/images/Johnny_Cano.jpg b/research/images/Johnny_Cano.jpg new file mode 100644 index 00000000..f59569f7 Binary files /dev/null and b/research/images/Johnny_Cano.jpg differ diff --git a/research/images/Kerry_Shih.jpg b/research/images/Kerry_Shih.jpg new file mode 100644 index 00000000..4019efea Binary files /dev/null and b/research/images/Kerry_Shih.jpg differ diff --git a/research/images/Kris_Kersey.jpg b/research/images/Kris_Kersey.jpg new file mode 100644 index 00000000..252d28df Binary files /dev/null and b/research/images/Kris_Kersey.jpg differ diff --git a/research/images/Lakshantha_Dissanayake.jpg b/research/images/Lakshantha_Dissanayake.jpg new file mode 100644 index 00000000..265f77e9 Binary files /dev/null and b/research/images/Lakshantha_Dissanayake.jpg differ diff --git a/research/images/Martin_Cerven.jpg b/research/images/Martin_Cerven.jpg new file mode 100644 index 00000000..d8d913c9 Binary files /dev/null and b/research/images/Martin_Cerven.jpg differ diff --git a/research/images/Michael_Gruner.jpg b/research/images/Michael_Gruner.jpg new file mode 100644 index 00000000..245a3c7f Binary files /dev/null and b/research/images/Michael_Gruner.jpg differ diff --git a/research/images/Mieszko_Syty.jpg b/research/images/Mieszko_Syty.jpg new file mode 100644 index 00000000..6918cfcb Binary files /dev/null and b/research/images/Mieszko_Syty.jpg differ diff --git a/research/images/Mike_Hansen.jpg b/research/images/Mike_Hansen.jpg new file mode 100644 index 00000000..aa025b65 Binary files /dev/null and b/research/images/Mike_Hansen.jpg differ diff --git a/research/images/Patty_Delafuente.jpg b/research/images/Patty_Delafuente.jpg new file mode 100644 index 00000000..a7435091 Binary files /dev/null and b/research/images/Patty_Delafuente.jpg differ diff --git a/research/images/Paul_DeCarlo.jpg b/research/images/Paul_DeCarlo.jpg new file mode 100644 index 00000000..00737c3b Binary files /dev/null and b/research/images/Paul_DeCarlo.jpg differ diff --git a/research/images/Raffaello_Bonghi.jpg b/research/images/Raffaello_Bonghi.jpg new file mode 100644 index 00000000..f8600e75 Binary files /dev/null and b/research/images/Raffaello_Bonghi.jpg differ diff --git a/research/images/Sammy_Ochoa.jpg b/research/images/Sammy_Ochoa.jpg new file mode 100644 index 00000000..68218789 Binary files /dev/null and b/research/images/Sammy_Ochoa.jpg differ diff --git a/research/images/Shakhizat_Nurgaliyev.jpg b/research/images/Shakhizat_Nurgaliyev.jpg new file mode 100644 index 00000000..69640de6 Binary files /dev/null and b/research/images/Shakhizat_Nurgaliyev.jpg differ diff --git a/research/images/Song_Han.jpg b/research/images/Song_Han.jpg new file mode 100644 index 00000000..86d61250 Binary files /dev/null and b/research/images/Song_Han.jpg differ diff --git a/research/images/Tianqi_Chen.jpg b/research/images/Tianqi_Chen.jpg new file mode 100644 index 00000000..21cf437b Binary files /dev/null and b/research/images/Tianqi_Chen.jpg differ diff --git a/research/images/Walter_Lucetti.jpg b/research/images/Walter_Lucetti.jpg new file mode 100644 index 00000000..f9dba28f Binary files /dev/null and b/research/images/Walter_Lucetti.jpg differ diff --git a/research/images/Ziad_Youssfi.jpg b/research/images/Ziad_Youssfi.jpg new file mode 100644 index 00000000..555c73c8 Binary files /dev/null and b/research/images/Ziad_Youssfi.jpg differ diff --git a/research/images/github.png b/research/images/github.png new file mode 100644 index 00000000..d6ec7c80 Binary files /dev/null and b/research/images/github.png differ diff --git a/research/images/hackster.png b/research/images/hackster.png new file mode 100644 index 00000000..f0cf8254 Binary files /dev/null and b/research/images/hackster.png differ diff --git a/research/images/linkedin.png b/research/images/linkedin.png new file mode 100644 index 00000000..db3b973f Binary files /dev/null and b/research/images/linkedin.png differ diff --git a/research/images/robots_0.jpg b/research/images/robots_0.jpg new file mode 100644 index 00000000..11225d74 Binary files /dev/null and b/research/images/robots_0.jpg differ diff --git a/research/images/robots_1.jpg b/research/images/robots_1.jpg new file mode 100644 index 00000000..fe2a2825 Binary files /dev/null and b/research/images/robots_1.jpg differ diff --git a/research/images/youtube.png b/research/images/youtube.png new file mode 100644 index 00000000..715ffcd5 Binary files /dev/null and b/research/images/youtube.png differ diff --git a/research/invites/Jetson AI Lab Research Group (41724).ics b/research/invites/Jetson AI Lab Research Group (41724).ics new file mode 100644 index 00000000..3b4a4ef8 --- /dev/null +++ b/research/invites/Jetson AI Lab Research Group (41724).ics @@ -0,0 +1,698 @@ +BEGIN:VCALENDAR +PRODID:-//Microsoft Corporation//Outlook 16.0 MIMEDIR//EN +VERSION:2.0 +METHOD:REQUEST +X-MS-OLK-FORCEINSPECTOROPEN:TRUE +BEGIN:VTIMEZONE +TZID:Eastern Standard Time +BEGIN:STANDARD +DTSTART:16011104T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11 +TZOFFSETFROM:-0400 +TZOFFSETTO:-0500 +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:16010311T020000 +RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3 +TZOFFSETFROM:-0500 +TZOFFSETTO:-0400 +END:DAYLIGHT +END:VTIMEZONE +BEGIN:VEVENT +CLASS:PUBLIC +CREATED:20240404T032045Z +DESCRIPTION:Agenda will be posted to: https://www.jetson-ai-lab.com/resear + ch.html#meeting-schedule\nTo add topics post to: https://forums.developer + .nvidia.com/t/jetson-ai-lab-research-group-meeting-on-4-17/288491\n_______ + _________________________________________________________________________\ + nMicrosoft Teams Need help? + \nJoin the meeting now \nMeeting ID: 267 767 899 + 278 \nPasscode: nA8Yiw \n________________________________\n\nDial-in by ph + one \n+1 949-570-1120\,\,507782695# Unite + d States\, Irvine \nFind a local number \nPhone conference I + D: 507 782 695# \nJoin on a video conferencing device \nTenant key: teams@ + vc.nvidia.com \nVideo ID: 117 259 101 9 \nMore info \nFor organizers: Meeting options | Reset dial-in PIN \n____________________________________________________ + ____________________________\n \n +DTEND;TZID="Eastern Standard Time":20240417T130000 +DTSTAMP:20240404T032045Z +DTSTART;TZID="Eastern Standard Time":20240417T120000 +LAST-MODIFIED:20240404T032045Z +LOCATION:Microsoft Teams Meeting +ORGANIZER;CN="Dustin Franklin":mailto:dustinf@nvidia.com +PRIORITY:5 +SEQUENCE:0 +SUMMARY;LANGUAGE=en-us:Jetson AI Lab Research Group (4/17/24) +TRANSP:OPAQUE +UID:040000008200E00074C5B7101A82E00800000000607E63361B86DA01000000000000000 + 010000000BB2295284EE52B47943FB644ADF8B55A +X-ALT-DESC;FMTTYPE=text/html:< + style>

Join us for the ina + ugural team meeting of the Jetson AI Lab Research Group!&nb + sp\; \;

 \;

On the agenda\, we will discuss the latest updates\, G + TC recap\, and experiments that we want to explore.

 \;

Anyone is welcome to j + oin and speak. Please remain courteous of others during the call\, and sta + y muted if you aren’\;t speaking.  + \; We’\;ll stick around after for anyone who has questions or + didn't get the chance to be heard. \; < + /span>This meeting will be recorded and posted afterwards for anyone unabl + e to attend.

 \;

< + div style='margin-bottom:.25in\;overflow:hidden'>

_______________________________________________ + _________________________________

Mic + rosoft Teams Need help?

Meeting ID: 223 573 467 074

Passcode: 6ybv + Cg


Dial-in by phone

+1 949-570-1120\,\,4 + 79114164# United States\, Irvine

Phone conference ID: 479 114 164#

Join on a v + ideo conferencing device

Tenant key: teams@vc.nvidia.com +

Video ID: 117 895 093 3< + span style='font-family:"Segoe UI"\,sans-serif\;mso-fareast-font-family:"T + imes New Roman"\;color:#242424'>

For organizers: Meeting options | Reset dial-in PIN +

____________________________________ + ____________________________________________

 \;

+X-MICROSOFT-CDO-BUSYSTATUS:BUSY +X-MICROSOFT-CDO-IMPORTANCE:1 +X-MICROSOFT-DISALLOW-COUNTER:FALSE +X-MS-OLK-AUTOFILLLOCATION:FALSE +X-MS-OLK-CONFTYPE:0 +BEGIN:VALARM +TRIGGER:-PT15M +ACTION:DISPLAY +DESCRIPTION:Reminder +END:VALARM +END:VEVENT +END:VCALENDAR diff --git a/research/invites/Jetson AI Lab Research Group (5124).ics b/research/invites/Jetson AI Lab Research Group (5124).ics new file mode 100644 index 00000000..a1a23e9b --- /dev/null +++ b/research/invites/Jetson AI Lab Research Group (5124).ics @@ -0,0 +1,700 @@ +BEGIN:VCALENDAR +PRODID:-//Microsoft Corporation//Outlook 16.0 MIMEDIR//EN +VERSION:2.0 +METHOD:REQUEST +X-MS-OLK-FORCEINSPECTOROPEN:TRUE +BEGIN:VTIMEZONE +TZID:Eastern Standard Time +BEGIN:STANDARD +DTSTART:16011104T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11 +TZOFFSETFROM:-0400 +TZOFFSETTO:-0500 +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:16010311T020000 +RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3 +TZOFFSETFROM:-0500 +TZOFFSETTO:-0400 +END:DAYLIGHT +END:VTIMEZONE +BEGIN:VEVENT +CLASS:PUBLIC +CREATED:20240421T213734Z +DESCRIPTION:Agenda will be posted to: https://www.jetson-ai-lab.com/resear + ch.html#meeting-schedule\n\n\nTo add topics to the agenda\, please post th + em here: https://forums.developer.nvidia.com/t/jetson-ai-lab-research-gro + up-meeting-on-5-1\n \n____________________________________________________ + ____________________________\nMicrosoft Teams Need help? \nJoin the meeting now \nMeeting ID: 216 392 143 478 \nPasscode: BGqJY2 \n____________________ + ____________\n\nDial-in by phone \n+1 949-570-1120\,\,87040341# United States\, Irvine \nFind a local number \nPhone conference ID: 870 403 41# \nJoin on a video conferencing de + vice \nTenant key: teams@vc.nvidia.com \nVideo ID: 112 441 609 6 \nMore in + fo \nFor organizers: Me + eting options | Reset dial-in PIN \n___________________________ + _____________________________________________________\n \n +DTEND;TZID="Eastern Standard Time":20240501T130000 +DTSTAMP:20240421T213734Z +DTSTART;TZID="Eastern Standard Time":20240501T120000 +LAST-MODIFIED:20240421T213734Z +LOCATION:Microsoft Teams Meeting +ORGANIZER;CN="Dustin Franklin":mailto:dustinf@nvidia.com +PRIORITY:5 +SEQUENCE:0 +SUMMARY;LANGUAGE=en-us:Jetson AI Lab Research Group (5/1/24) +TRANSP:OPAQUE +UID:040000008200E00074C5B7101A82E00800000000D04B902B1294DA01000000000000000 + 01000000092A6B86684F8BC439AB6FF690BFD3CDD +X-ALT-DESC;FMTTYPE=text/html:

Agenda will be posted to: \; https://www.jetson-ai-lab.com/resear + ch.html#meeting-schedule
< + ![if !supportLineBreakNewLine]>

To add topics to the agenda\ + , please post them here: \; https://forums.developer.nvidia.com/t/jetson-ai-lab-research + -group-meeting-on-5-1

 \;

__________________________________ + ______________________________________________

Microsoft Teams Need help?

Meeting ID: 216 392 143 478

Passcode: < + /span>BGqJY2 +


Dial-in by phone

+1 949-5 + 70-1120\,\,87040341# United States\, Irvine

+ Phone conference ID: 870 403 41# +

Jo + in on a video conferencing device

Tenant key: teams@vc.nvidia.com

Video ID: 112 441 609 6 +

____________________________________________________________________ + ____________

&nbs + p\;

+X-MICROSOFT-CDO-BUSYSTATUS:BUSY +X-MICROSOFT-CDO-IMPORTANCE:1 +X-MICROSOFT-DISALLOW-COUNTER:FALSE +X-MS-OLK-AUTOFILLLOCATION:FALSE +X-MS-OLK-CONFTYPE:0 +BEGIN:VALARM +TRIGGER:-PT15M +ACTION:DISPLAY +DESCRIPTION:Reminder +END:VALARM +END:VEVENT +END:VCALENDAR diff --git a/research/invites/Jetson AI Lab Research Group (51524).ics b/research/invites/Jetson AI Lab Research Group (51524).ics new file mode 100644 index 00000000..c4708b61 --- /dev/null +++ b/research/invites/Jetson AI Lab Research Group (51524).ics @@ -0,0 +1,244 @@ +BEGIN:VCALENDAR +PRODID:-//Microsoft Corporation//Outlook 16.0 MIMEDIR//EN +VERSION:2.0 +METHOD:REQUEST +X-MS-OLK-FORCEINSPECTOROPEN:TRUE +BEGIN:VTIMEZONE +TZID:Eastern Standard Time +BEGIN:STANDARD +DTSTART:16011104T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11 +TZOFFSETFROM:-0400 +TZOFFSETTO:-0500 +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:16010311T020000 +RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3 +TZOFFSETFROM:-0500 +TZOFFSETTO:-0400 +END:DAYLIGHT +END:VTIMEZONE +BEGIN:VEVENT +ATTENDEE;CN="Chitoku Yato";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:cyato@nvid + ia.com +ATTENDEE;CN="Dana Sheahen";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:dsheahen@n + vidia.com +ATTENDEE;CN="Patricia Delafuente";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:pde + lafuente@nvidia.com +ATTENDEE;CN="Sammy Ochoa";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:sochoa@nvid + ia.com +ATTENDEE;CN="Nigel Nelson";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:nigeln@nvi + dia.com +ATTENDEE;CN="Chen Su";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:chesu@nvidia.co + m +ATTENDEE;CN="Raffaello Bonghi";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:rbongh + i@nvidia.com +ATTENDEE;CN="Lynette Farinas";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:lfarina + s@nvidia.com +ATTENDEE;CN="Mieszko Syty";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:mieszko@ms + 1design.pl +ATTENDEE;CN="Michael Hansen";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:michael. + hansen@nabucasa.com +ATTENDEE;CN="Michael Gruner";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:michael. + gruner@ridgerun.com +ATTENDEE;CN="Jason Seawall";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:jseawall@ + numurus.com +ATTENDEE;CN="Kris Kersey";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:kris@kersey + fabrications.com +ATTENDEE;CN=JetsonHacks;ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:jim@jetsonhac + ks.com +ATTENDEE;CN="David Niewinski";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:dave@ar + mourylabs.com +ATTENDEE;CN="Elaine Wu";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:elaine.wu@see + ed.cc +ATTENDEE;CN="Bryan Hughes";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:bryan@mimz + y.ai +ATTENDEE;CN="Jesse Flot";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:jbflot@nrec. + ri.cmu.edu +ATTENDEE;CN="Ziad Youssfi";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:zyoussfi@a + ndrew.cmu.edu +ATTENDEE;CN="Paul DeCarlo";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:pdecarlo@m + icrosoft.com +ATTENDEE;CN=walter.lucetti@stereolabs.com;ROLE=OPT-PARTICIPANT;RSVP=TRUE:ma + ilto:walter.lucetti@stereolabs.com +ATTENDEE;CN="Mark Neville";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:mark.mnevi + lle@gmail.com +ATTENDEE;CN=paulgeorgesavluc@gmail.com;ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailt + o:paulgeorgesavluc@gmail.com +ATTENDEE;CN=Connect@openqquantify.com;ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto + :Connect@openqquantify.com +ATTENDEE;CN="Jairo Torregrosa";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mailto:torreg + rosa.ja@gmail.com +ATTENDEE;CN="Senthil Kumar Ravichandran";ROLE=OPT-PARTICIPANT;RSVP=TRUE:mai + lto:senthilr@nvidia.com +CLASS:PUBLIC +CREATED:20240502T143843Z +DESCRIPTION:Agenda will be posted to: https://www.jetson-ai-lab.com/resear + ch.html#meeting-schedule\n\n\n\nTo add topics to the agenda\, please post + them here: https://forums.developer.nvidia.com/t/jetson-ai-lab-research-g + roup-meeting-on-5-15\n\n \n\n_____________________________________________ + ___________________________________\n\nMicrosoft Teams Need help? \n\nJoin the meeting now \n\nMeeting ID: 221 065 454 301 \n\nPasscode: dVXWcf \n\n___ + _____________________________\n\nDial-in by phone \n\n+1 949-570-1120\,\,4 + 84543941# United States\, Irvine \n\nFind + a local number \n\nPhone conference ID: 484 543 941# \n\nJo + in on a video conferencing device \n\nTenant key: teams@vc.nvidia.com \n\n + Video ID: 114 432 168 9 \n\nMore info \n\nFor organizers: Meeting options | Reset dial-in PIN \n\n______________________________________________________________ + __________________\n\n \n\n +DTEND;TZID="Eastern Standard Time":20240515T130000 +DTSTAMP:20240502T143803Z +DTSTART;TZID="Eastern Standard Time":20240515T120000 +LAST-MODIFIED:20240502T143843Z +LOCATION:Microsoft Teams Meeting +ORGANIZER;CN="Dustin Franklin":mailto:dustinf@nvidia.com +PRIORITY:5 +SEQUENCE:0 +SUMMARY;LANGUAGE=en-us:Jetson AI Lab Research Group (5/15/24) +TRANSP:OPAQUE +UID:040000008200E00074C5B7101A82E008000000003047B3AA7A9CDA01000000000000000 + 01000000051FD82525AD928468120EA9BB47A7474 +X-ALT-DESC;FMTTYPE=text/html: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

RAM Optimization

+

Running a LLM requires a huge RAM space.

+

Especially if you are on Jetson Orin Nano that only has 8GB of RAM, it is crucial to leave as much RAM space available for models.

+

Here we share a couple of ways to optimize the system RAM usage.

+

Disabling the Desktop GUI

+

If you use your Jetson remotely through SSH, you can disable the Ubuntu desktop GUI.
+This will free up extra memory that the window manager and desktop uses (around ~800MB for Unity/GNOME).

+

You can disable the desktop temporarily, run commands in the console, and then re-start the desktop when desired:

+
$ sudo init 3     # stop the desktop
+# log your user back into the console (Ctrl+Alt+F1, F2, ect)
+$ sudo init 5     # restart the desktop
+
+

If you wish to make this persistent across reboots, you can use the following commands to change the boot-up behavior:

+
    +
  • +

    To disable desktop on boot

    +
    sudo systemctl set-default multi-user.target
    +
    +
  • +
  • +

    To enable desktop on boot

    +
    sudo systemctl set-default graphical.target
    +
    +
  • +
+

Disabling misc services

+
sudo systemctl disable nvargus-daemon.service
+
+

Mounting Swap

+

If you're building containers or working with large models, it's advisable to mount SWAP (typically correlated with the amount of memory in the board). Run these commands to disable ZRAM and create a swap file:

+
+

If you have NVMe SSD storage available, it's preferred to allocate the swap file on the NVMe SSD.

+
+
sudo systemctl disable nvzramconfig
+sudo fallocate -l 16G /ssd/16GB.swap
+sudo mkswap /ssd/16GB.swap
+sudo swapon /ssd/16GB.swap
+
+

Then add the following line to the end of /etc/fstab to make the change persistent:

+
/ssd/16GB.swap  none  swap  sw 0  0
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tips_ssd-docker.html b/tips_ssd-docker.html new file mode 100644 index 00000000..c666c758 --- /dev/null +++ b/tips_ssd-docker.html @@ -0,0 +1,2090 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 🔖 SSD + Docker - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tips - SSD + Docker

+

Once you have your Jetson set up by flashing the latest Jetson Linux (L4T) BSP on it or by flashing the SD card with the whole JetPack image, before embarking on testing out all the great generative AI application using jetson-containers, you want to make sure you have a huge storage space for all the containers and the models you will download.

+

We are going to show how you can install SSD on your Jetson, and set it up for Docker.

+

SSD

+

Physical installation

+
    +
  1. Unplug power and any peripherals from the Jetson developer kit.
  2. +
  3. Physically install an NVMe SSD card on the carrier board of your Jetson developer kit, making sure to properly seat the connector and secure with the screw.
  4. +
  5. Reconnect any peripherals, and then reconnect the power supply to turn on the Jetson developer kit.
  6. +
  7. +

    Once the system is up, verify that your Jetson identifies a new memory controller on PCI bus:

    +
    lspci
    +
    +

    The output should look like the following:

    +
    0007:01:00.0 Non-Volatile memory controller: Marvell Technology Group Ltd. Device 1322 (rev 02)
    +
    +
  8. +
+

Format and set up auto-mount

+
    +
  1. +

    Run lsblk to find the device name.

    +
    lsblk
    +
    +

    The output should look like the following:

    +
    NAME         MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
    +loop0          7:0    0    16M  1 loop 
    +mmcblk1      179:0    0  59.5G  0 disk 
    +├─mmcblk1p1  179:1    0    58G  0 part /
    +├─mmcblk1p2  179:2    0   128M  0 part 
    +├─mmcblk1p3  179:3    0   768K  0 part 
    +├─mmcblk1p4  179:4    0  31.6M  0 part 
    +├─mmcblk1p5  179:5    0   128M  0 part 
    +├─mmcblk1p6  179:6    0   768K  0 part 
    +├─mmcblk1p7  179:7    0  31.6M  0 part 
    +├─mmcblk1p8  179:8    0    80M  0 part 
    +├─mmcblk1p9  179:9    0   512K  0 part 
    +├─mmcblk1p10 179:10   0    64M  0 part 
    +├─mmcblk1p11 179:11   0    80M  0 part 
    +├─mmcblk1p12 179:12   0   512K  0 part 
    +├─mmcblk1p13 179:13   0    64M  0 part 
    +└─mmcblk1p14 179:14   0 879.5M  0 part 
    +zram0        251:0    0   1.8G  0 disk [SWAP]
    +zram1        251:1    0   1.8G  0 disk [SWAP]
    +zram2        251:2    0   1.8G  0 disk [SWAP]
    +zram3        251:3    0   1.8G  0 disk [SWAP]
    +nvme0n1      259:0    0 238.5G  0 disk 
    +
    +

    Identify the device corresponding to your SSD. In this case, it is nvme0n1.

    +
  2. +
  3. +

    Format the SSD, create a mount point, and mount it to the filesystem.

    +
    sudo mkfs.ext4 /dev/nvme0n1
    +
    +
    +

    You can choose any name for the mount point directory. We use /ssd here, but in jetson-containers' setup.md documentation, /mnt is used.

    +
    +
    sudo mkdir /ssd
    +
    +
    sudo mount /dev/nvme0n1 /ssd
    +
    +
  4. +
  5. +

    In order to ensure that the mount persists after boot, add an entry to the fstab file:

    +

    First, identify the UUID for your SSD:

    +
    lsblk -f
    +
    +

    Then, add a new entry to the fstab file:

    +
    sudo vi /etc/fstab
    +
    +

    Insert the following line, replacing the UUID with the value found from lsblk -f:

    +
    UUID=************-****-****-****-******** /ssd/ ext4 defaults 0 2
    +
    +
  6. +
  7. +

    Finally, change the ownership of the /ssd directory.

    +
    sudo chown ${USER}:${USER} /ssd
    +
    +
  8. +
+

Docker

+
    +
  1. +

    Install nvidia-container package.

    +
    +

    Note: If you used an NVIDIA-supplied SD card image to flash your SD card, all necessary JetPack components (including nvidia-containers) and Docker are already pre-installed, so this step can be skipped.

    +
    +
    sudo apt update
    +sudo apt install -y nvidia-container
    +
    +
    +

    JetPack 6.0 DP users

    +

    If you flash Jetson Linux (L4T) R36.2 (JetPack 6.0 DP) on your Jetson using SDK Manager, and install nvidia-container using apt, on JetPack 6.0 it no longer automatically installs Docker.

    +

    Therefore, you need to run the following to manually install Docker and set it up.

    +
    sudo apt update
    +sudo apt install -y nvidia-container curl
    +curl https://get.docker.com | sh && sudo systemctl --now enable docker
    +sudo nvidia-ctk runtime configure --runtime=docker
    +
    +
    +
  2. +
  3. +

    Restart the Docker service and add your user to the docker group, so that you don't need to use the command with sudo.

    +
    sudo systemctl restart docker
    +sudo usermod -aG docker $USER
    +newgrp docker
    +
    +
  4. +
  5. +

    Add default runtime in /etc/docker/daemon.json

    +
    sudo vi /etc/docker/daemon.json
    +
    +

    Insert the "default-runtime": "nvidia" line as following:

    +
    {
    +    "runtimes": {
    +        "nvidia": {
    +            "path": "nvidia-container-runtime",
    +            "runtimeArgs": []
    +        }
    +    },
    +    "default-runtime": "nvidia"
    +}
    +
    +
  6. +
  7. +

    Restart Docker

    +
    sudo systemctl daemon-reload && sudo systemctl restart docker
    +
    +
  8. +
+

Migrate Docker directory to SSD

+

Now that the SSD is installed and available on your device, you can use the extra storage capacity to hold the storage-demanding Docker directory.

+
    +
  1. +

    Stop the Docker service.

    +
    sudo systemctl stop docker
    +
    +
  2. +
  3. +

    Move the existing Docker folder

    +
    sudo du -csh /var/lib/docker/ && \
    +    sudo mkdir /ssd/docker && \
    +    sudo rsync -axPS /var/lib/docker/ /ssd/docker/ && \
    +    sudo du -csh  /ssd/docker/ 
    +
    +
  4. +
  5. +

    Edit /etc/docker/daemon.json

    +
    sudo vi /etc/docker/daemon.json
    +
    +

    Insert "data-root" line like the following.

    +
    {
    +    "runtimes": {
    +        "nvidia": {
    +            "path": "nvidia-container-runtime",
    +            "runtimeArgs": []
    +        }
    +    },
    +    "default-runtime": "nvidia",
    +    "data-root": "/ssd/docker"
    +}
    +
    +
  6. +
  7. +

    Rename the old Docker data directory

    +
    sudo mv /var/lib/docker /var/lib/docker.old
    +
    +
  8. +
  9. +

    Restart the docker daemon

    +
    sudo systemctl daemon-reload && \
    +    sudo systemctl restart docker && \
    +    sudo journalctl -u docker
    +
    +
  10. +
+

Test Docker on SSD

+
    +
  1. +

    [Terminal 1] First, open a terminal to monitor the disk usage while pulling a Docker image.

    +
    watch -n1 df 
    +
    +
  2. +
  3. +

    [Terminal 2] Next, open a new terminal and start Docker pull.

    +
    docker pull nvcr.io/nvidia/l4t-base:r35.2.1
    +
    +
  4. +
  5. +

    [Terminal 1] Observe that the disk usage on /ssd goes up as the container image is downloaded and extracted.

    +
    ~$ docker image ls
    +REPOSITORY                  TAG       IMAGE ID       CREATED        SIZE
    +nvcr.io/nvidia/l4t-base     r35.2.1   dc07eb476a1d   7 months ago   713MB
    +
    +
  6. +
+

Final Verification

+

Reboot your Jetson, and verify that you observe the following:

+
~$ sudo blkid | grep nvme
+/dev/nvme0n1: UUID="9fc06de1-7cf3-43e2-928a-53a9c03fc5d8" TYPE="ext4"
+
+~$ df -h
+Filesystem      Size  Used Avail Use% Mounted on
+/dev/mmcblk1p1  116G   18G   94G  16% /
+none            3.5G     0  3.5G   0% /dev
+tmpfs           3.6G  108K  3.6G   1% /dev/shm
+tmpfs           734M   35M  699M   5% /run
+tmpfs           5.0M  4.0K  5.0M   1% /run/lock
+tmpfs           3.6G     0  3.6G   0% /sys/fs/cgroup
+tmpfs           734M   88K  734M   1% /run/user/1000
+/dev/nvme0n1    458G  824M  434G   1% /ssd
+
+~$ docker info | grep Root
+ Docker Root Dir: /ssd/docker
+
+~$ sudo ls -l /ssd/docker/
+total 44
+drwx--x--x  4 root root 4096 Mar 22 11:44 buildkit
+drwx--x---  2 root root 4096 Mar 22 11:44 containers
+drwx------  3 root root 4096 Mar 22 11:44 image
+drwxr-x---  3 root root 4096 Mar 22 11:44 network
+drwx--x--- 13 root root 4096 Mar 22 16:20 overlay2
+drwx------  4 root root 4096 Mar 22 11:44 plugins
+drwx------  2 root root 4096 Mar 22 16:19 runtimes
+drwx------  2 root root 4096 Mar 22 11:44 swarm
+drwx------  2 root root 4096 Mar 22 16:20 tmp
+drwx------  2 root root 4096 Mar 22 11:44 trust
+drwx-----x  2 root root 4096 Mar 22 16:19 volumes
+
+~$ sudo du -chs /ssd/docker/
+752M    /ssd/docker/
+752M    total
+
+~$ docker info | grep -e "Runtime" -e "Root"
+ Runtimes: io.containerd.runtime.v1.linux nvidia runc io.containerd.runc.v2
+ Default Runtime: nvidia
+ Docker Root Dir: /ssd/docker
+
+

Your Jetson is now set up with the SSD!

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/try.html b/try.html new file mode 100644 index 00000000..91a078e0 --- /dev/null +++ b/try.html @@ -0,0 +1,1646 @@ + + + + + + + + + + + + + + + + + + + + + + Try - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial-intro.html b/tutorial-intro.html new file mode 100644 index 00000000..2e0dec76 --- /dev/null +++ b/tutorial-intro.html @@ -0,0 +1,2085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + +

Tutorial - Introduction

+

Overview

+

Our tutorials are divided into categories roughly based on model modality, the type of data to be processed or generated.

+

Text (LLM)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
text-generation-webuiInteract with a local AI assistant by running a LLM with oobabooga's text-generaton-webui
OllamaGet started effortlessly deploying GGUF models for chat and web UI
llamaspeakTalk live with Llama using Riva ASR/TTS, and chat about images with Llava!
NanoLLMOptimized inferencing library for LLMs, multimodal agents, and speech.
Small LLM (SLM)Deploy Small Language Models (SLM) with reduced memory usage and higher throughput.
API ExamplesLearn how to write Python code for doing LLM inference using popular APIs.
+

Text + Vision (VLM)

+

Give your locally running LLM an access to vision!

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Mini-GPT4Mini-GPT4, an open-source model that demonstrate vision-language capabilities.
LLaVALarge Language and Vision Assistant, multimodal model that combines a vision encoder and LLM for visual and language understanding.
Live LLaVARun multimodal models interactively on live video streams over a repeating set of prompts.
NanoVLMUse mini vision/language models and the optimized multimodal pipeline for live streaming.
+

Image Generation

+ + + + + + + + + + + + + + + + + +
Stable DiffusionRun AUTOMATIC1111's stable-diffusion-webui to generate images from prompts
Stable Diffusion XLA newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities.
+

Vision Transformers (ViT)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
EfficientVITMIT Han Lab's EfficientViT, Multi-Scale Linear Attention for High-Resolution Dense Prediction
NanoOWLOWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT
NanoSAMNanoSAM, SAM model variant capable of running in real-time on Jetson
SAMMeta's SAM, Segment Anything model
TAMTAM, Track-Anything model, is an interactive tool for video object tracking and segmentation
+

Vector Database

+ + + + + + + + + + + + + +
NanoDBInteractive demo to witness the impact of Vector Database that handles multimodal data
+

Audio

+ + + + + + + + + + + + + + + + + + + + + +
WhisperOpenAI's Whisper, pre-trained model for automatic speech recognition (ASR)
AudioCraftMeta's AudioCraft, to produce high-quality audio and music
VoicecraftVoicecraft, Speech editing and zero shot TTS
+

Metropolis Microservices

+ + + + + + + + + + + + + +
First StepsGet Metropolis Microservices up & running on Jetson with NVStreamer and AI NVR capabilities.
+

About NVIDIA Jetson

+
+

Note

+

We are mainly targeting Jetson Orin generation devices for deploying the latest LLMs and generative AI models.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Jetson AGX Orin 64GB Developer KitJetson AGX Orin Developer KitJetson Orin Nano Developer Kit


GPU2048-core NVIDIA Ampere architecture GPU with 64 Tensor Cores1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores
RAM
(CPU+GPU)
64GB32GB8GB
Storage64GB eMMC (+ NVMe SSD)microSD card (+ NVMe SSD)
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_api-examples.html b/tutorial_api-examples.html new file mode 100644 index 00000000..c263a59a --- /dev/null +++ b/tutorial_api-examples.html @@ -0,0 +1,1890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + API Examples - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - API Examples

+

It's good to know the code for generating text with LLM inference, and ancillary things like tokenization, chat templates, and prompting. On this page we give Python examples of running various LLM APIs, and their benchmarks.

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35) +JetPack 6 (L4T r36)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 22GB for l4t-text-generation container image
    • +
    • Space for models (>10GB)
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

Transformers

+

The HuggingFace Transformers API is the de-facto API that models are released for, often serving as the reference implementation. It's not terribly fast, but it does have broad model support, and also supports quantization (AutoGPTQ, AWQ). This uses streaming:

+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
+from threading import Thread
+
+model_name='meta-llama/Llama-2-7b-chat-hf'
+model = AutoModelForCausalLM.from_pretrained(model_name, device_map='cuda')
+
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+streamer = TextIteratorStreamer(tokenizer)
+
+prompt = [{'role': 'user', 'content': 'Can I get a recipe for French Onion soup?'}]
+inputs = tokenizer.apply_chat_template(
+    prompt,
+    add_generation_prompt=True,
+    return_tensors='pt'
+).to(model.device)
+
+Thread(target=lambda: model.generate(inputs, max_new_tokens=256, streamer=streamer)).start()
+
+for text in streamer:
+    print(text, end='', flush=True)
+
+

To run this (it can be found here), you can mount a directory containing the script or your jetson-containers directory:

+
jetson-containers run --volume $PWD/packages/llm:/mount --workdir /mount \
+  $(autotag l4t-text-generation) \
+    python3 transformers/test.py
+
+

We use the l4t-text-generation container because it includes the quantization libraries in addition to Transformers, for running the quanztized versions of the models like TheBloke/Llama-2-7B-Chat-GPTQ

+

Benchmarks

+

The huggingface-benchmark.py script will benchmark the models:

+
./run.sh --volume $PWD/packages/llm/transformers:/mount --workdir /mount \
+  $(./autotag l4t-text-generation) \
+    python3 huggingface-benchmark.py --model meta-llama/Llama-2-7b-chat-hf
+
+
* meta-llama/Llama-2-7b-chat-hf  AVG = 20.7077 seconds,  6.2 tokens/sec  memory=10173.45 MB
+* TheBloke/Llama-2-7B-Chat-GPTQ  AVG = 12.3922 seconds, 10.3 tokens/sec  memory=7023.36 MB
+* TheBloke/Llama-2-7B-Chat-AWQ   AVG = 11.4667 seconds, 11.2 tokens/sec  memory=4662.34 MB
+
+

NanoLLM

+

The NanoLLM library uses the optimized MLC/TVM library for inference, like on the Benchmarks page:

+

+
> NanoLLM Reference Documentation
from nano_llm import NanoLLM, ChatHistory, ChatTemplates
+
+# load model
+model = NanoLLM.from_pretrained(
+    model='meta-llama/Meta-Llama-3-8B-Instruct', 
+    quantization='q4f16_ft', 
+    api='mlc'
+)
+
+# create the chat history
+chat_history = ChatHistory(model, system_prompt="You are a helpful and friendly AI assistant.")
+
+while True:
+    # enter the user query from terminal
+    print('>> ', end='', flush=True)
+    prompt = input().strip()
+
+    # add user prompt and generate chat tokens/embeddings
+    chat_history.append(role='user', msg=prompt)
+    embedding, position = chat_history.embed_chat()
+
+    # generate bot reply
+    reply = model.generate(
+        embedding, 
+        streaming=True, 
+        kv_cache=chat_history.kv_cache,
+        stop_tokens=chat_history.template.stop,
+        max_new_tokens=256,
+    )
+
+    # append the output stream to the chat history
+    bot_reply = chat_history.append(role='bot', text='')
+
+    for token in reply:
+        bot_reply.text += token
+        print(token, end='', flush=True)
+
+    print('\n')
+
+    # save the inter-request KV cache 
+    chat_history.kv_cache = reply.kv_cache
+
+

This example keeps an interactive chat running with text being entered from the terminal. You can start it like this:

+
jetson-containers run \
+  --env HUGGINGFACE_TOKEN=hf_abc123def \
+  $(autotag nano_llm) \
+    python3 -m nano_llm.chat.example
+
+

Or for easy editing from the host device, copy the source into your own script and mount it into the container with the --volume flag. And for authenticated models, request access through HuggingFace (like with Llama) and substitute your account's API token above.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_audiocraft.html b/tutorial_audiocraft.html new file mode 100644 index 00000000..66ce3846 --- /dev/null +++ b/tutorial_audiocraft.html @@ -0,0 +1,1872 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AudioCraft - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - AudioCraft

+

Let's run Meta's AudioCraft, to produce high-quality audio and music on Jetson!

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 10.7 GB for audiocraft container image
    • +
    • Space for checkpoints
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use run.sh and autotag script to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag audiocraft)
+
+

The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

+

Open your browser and access http://<IP_ADDRESS>:8888.

+
+

The default password for Jupyter Lab is nvidia.

+
+

Run Jupyter notebooks

+

AudioCraft repo comes with demo Jupyter notebooks.

+

On Jupyter Lab navigation pane on the left, double-click demos folder.

+

+

AudioGen demo

+ + +

Run cells with Shift + Enter, first one will download models, which can take some time.

+
+

Info

+

You may encounter an error message like the following when executing the first cell, but you can keep going. +

A matching Triton is not available, some optimizations will not be enabled.
+Error caught was: No module named 'triton'
+

+
+ + +

In the Audio Continuation cells, you can generate continuation based on text, while in Text-conditional Generation you can generate audio based just on text descriptions.

+

You can also use your own audio as prompt, and use text descriptions to generate continuation: +

prompt_waveform, prompt_sr = torchaudio.load("../assets/sirens_and_a_humming_engine_approach_and_pass.mp3") # you can upload your own audio
+prompt_duration = 2
+prompt_waveform = prompt_waveform[..., :int(prompt_duration * prompt_sr)]
+output = model.generate_continuation(prompt_waveform.expand(3, -1, -1), prompt_sample_rate=prompt_sr,descriptions=[
+        'Subway train blowing its horn',   # text descriptions for continuation
+        'Horse neighing furiously',
+        'Cat hissing'
+], progress=True)
+display_audio(output, sample_rate=16000)
+

+

MusicGen and MAGNeT demos

+

The two other jupyter notebooks are similar to AuidioGen, where you can generate continuation or generate audio, while using models trained to generate music.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_distillation.html b/tutorial_distillation.html new file mode 100644 index 00000000..02f54268 --- /dev/null +++ b/tutorial_distillation.html @@ -0,0 +1,1666 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 📑 Knowledge Distillation - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_efficientvit.html b/tutorial_efficientvit.html new file mode 100644 index 00000000..66f5fb7e --- /dev/null +++ b/tutorial_efficientvit.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_live-llava.html b/tutorial_live-llava.html new file mode 100644 index 00000000..3b1206b7 --- /dev/null +++ b/tutorial_live-llava.html @@ -0,0 +1,1908 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Live LLaVA - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - Live LLaVA

+
+

Recommended

+

Follow the chat-based LLaVA and NanoVLM tutorials to familiarize yourself with vision/language models and test the models first.

+
+

This multimodal agent runs a vision-language model on a live camera feed or video stream, repeatedly applying the same prompts to it:

+

+

It uses models like LLaVA or VILA and has been quantized with 4-bit precision. This runs an optimized multimodal pipeline from the NanoLLM library, including running the CLIP/SigLIP vision encoder in TensorRT, event filters and alerts, and multimodal RAG (see the NanoVLM page for benchmarks)

+

+

Running the Live Llava Demo

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 22GB for nano_llm container image
    • +
    • Space for models (>10GB)
    • +
    +
  6. +
  7. +

    Follow the chat-based LLaVA and NanoVLM tutorials first.

    +
  8. +
  9. +

    Supported vision/language models:

    + +
  10. +
+
+

The VideoQuery agent applies prompts to the incoming video feed with the VLM. Navigate your browser to https://<IP_ADDRESS>:8050 after launching it with your camera (Chrome is recommended with chrome://flags#enable-webrtc-hide-local-ips-with-mdns disabled)

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.agents.video_query --api=mlc \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-context-len 256 \
+    --max-new-tokens 32 \
+    --video-input /dev/video0 \
+    --video-output webrtc://@:8554/output
+
+

+

This uses jetson_utils for video I/O, and for options related to protocols and file formats, see Camera Streaming and Multimedia. In the example above, it captures a V4L2 USB webcam connected to the Jetson (under the device /dev/video0) and outputs a WebRTC stream.

+

Processing a Video File or Stream

+

The example above was running on a live camera, but you can also read and write a video file or network stream by substituting the path or URL to the --video-input and --video-output command-line arguments like this:

+
jetson-containers run \
+  -v /path/to/your/videos:/mount
+  $(autotag nano_llm) \
+    python3 -m nano_llm.agents.video_query --api=mlc \
+      --model Efficient-Large-Model/VILA1.5-3b \
+      --max-context-len 256 \
+      --max-new-tokens 32 \
+      --video-input /mount/my_video.mp4 \
+      --video-output /mount/output.mp4 \
+      --prompt "What does the weather look like?"
+
+

This example processes and pre-recorded video (in MP4, MKV, AVI, FLV formats with H.264/H.265 encoding), but it also can input/output live network streams like RTP, RTSP, and WebRTC using Jetson's hardware-accelerated video codecs.

+

NanoDB Integration

+

If you launch the VideoQuery agent with the --nanodb flag along with a path to your NanoDB database, it will perform reverse-image search on the incoming feed against the database by re-using the CLIP embeddings generated by the VLM.

+

To enable this mode, first follow the NanoDB tutorial to download, index, and test the database. Then launch VideoQuery like this:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.agents.video_query --api=mlc \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-context-len 256 \
+    --max-new-tokens 32 \
+    --video-input /dev/video0 \
+    --video-output webrtc://@:8554/output \
+    --nanodb /data/nanodb/coco/2017
+
+

You can also tag incoming images and add them to the database using the web UI, for one-shot recognition tasks:

+
+ +
+ +

Video VILA

+

The VILA-1.5 family of models can understand multiple images per query, enabling video search/summarization, action & behavior analysis, change detection, and other temporal-based vision functions. The vision/video.py example keeps a rolling history of frames:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.vision.video \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-images 8 \
+    --max-new-tokens 48 \
+    --video-input /data/my_video.mp4 \
+    --video-output /data/my_output.mp4 \
+    --prompt 'What changes occurred in the video?'
+
+

+

Note: support will be added to the web UI for continuous multi-image queries on video sequences.

+

Python Code

+

For a simplified code example of doing live VLM streaming from Python, see here in the NanoLLM docs.

+ + +

You can use this to implement customized prompting techniques and integrate with other vision pipelines. This code applies the same set of prompts to the latest image from the video feed. See here for the version that does multi-image queries on video sequences.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_llamaspeak.html b/tutorial_llamaspeak.html new file mode 100644 index 00000000..25cb1a11 --- /dev/null +++ b/tutorial_llamaspeak.html @@ -0,0 +1,1839 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + llamaspeak - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - llamaspeak

+

Talk live with Llama using streaming ASR/TTS, and chat about images with Llava!

+

+
    +
  • The NanoLLM library provides optimized inference for LLM and speech models.
  • +
  • It's recommended to run JetPack 6.0 to be able to run the latest containers.
  • +
+

The WebChat agent has responsive conversational abilities and multimodal support for chatting about images with vision/language models, including overlapping ASR/LLM/TTS generation and verbal interruptability.

+

Running llamaspeak

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 6 (L4T r36)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 22GB for nano_llm container image
    • +
    • Space for models (>10GB)
    • +
    +
  6. +
  7. +

    Start the Riva server first and test the ASR examples.

    +
  8. +
+
+
jetson-containers run --env HUGGINGFACE_TOKEN=hf_xyz123abc456 \
+  $(autotag nano_llm) \
+  python3 -m nano_llm.agents.web_chat --api=mlc \
+    --model meta-llama/Meta-Llama-3-8B-Instruct \
+    --asr=riva --tts=piper
+
+

This will start llamaspeak with text LLM and ASR/TTS enabled. You can then navigate your browser to https://IP_ADDRESS:8050 + +

    +
  • The default port is 8050, but can be changed with --web-port (and --ws-port for the websocket port)
  • +
  • During bot replies, the TTS model will pause output if you speak a few words in the mic to interrupt it.
  • +
  • Request access to the Llama models on HuggingFace and substitute your account's API token above.
  • +
+

+ + +

The code and docs for the WebAgent that runs llamaspeak can be found in the NanoLLM library. This block diagram shows the speech pipeline with interleaved model generation, user interruption, and streaming I/O:

+

+

Multimodality

+

If you load a multimodal vision/language model instead, you can drag images into the chat and ask questions about them:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.agents.web_chat --api=mlc \
+    --model Efficient-Large-Model/VILA-7b \
+    --asr=riva --tts=piper
+
+ + +

For more info about the supported vision/language models, see the NanoVLM page.

+

Function Calling

+

There's the ability to define functions from Python that the bot has access to and can invoke based on the chat flow:

+ + +

This works by using the bot_function() decorator and adding the API description's to the system prompt:

+
from nano_llm import NanoLLM, ChatHistory, BotFunctions, bot_function
+from datetime import datetime
+
+@bot_function
+def DATE():
+    """ Returns the current date. """
+    return datetime.now().strftime("%A, %B %-m %Y")
+
+@bot_function
+def TIME():
+    """ Returns the current time. """
+    return datetime.now().strftime("%-I:%M %p")
+
+system_prompt = "You are a helpful and friendly AI assistant." + BotFunctions.generate_docs()
+
+

The system prompt can be autogenerated from the Python docstrings embedded in the functions themselves, and can include parameters that the bot can supply (for example, selectively saving relevant user info to a vector database for RAG like is shown in the video).

+

For more information about this topic, see the Function Calling section of the NanoLLM documentation.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_llava.html b/tutorial_llava.html new file mode 100644 index 00000000..2aed0821 --- /dev/null +++ b/tutorial_llava.html @@ -0,0 +1,2143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + LLaVA - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Tutorial - LLaVA

+

LLaVA is a popular multimodal vision/language model that you can run locally on Jetson to answer questions about image prompts and queries. Llava uses the CLIP vision encoder to transform images into the same embedding space as its LLM (which is the same as Llama architecture). Below we cover different methods to run Llava on Jetson, with increasingly optimized performance:

+
    +
  1. Chat with Llava using text-generation-webui
  2. +
  3. Run from the terminal with llava.serve.cli
  4. +
  5. Quantized GGUF models with llama.cpp
  6. +
  7. Optimized Multimodal Pipeline with NanoVLM
  8. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Llava-13B (Jetson AGX Orin)QuantizationTokens/secMemory
text-generation-webui4-bit (GPTQ)2.39.7 GB
llava.serve.cliFP16 (None)4.227.7 GB
llama.cpp4-bit (Q4_K)10.19.2 GB
NanoVLM4-bit (MLC)21.18.7 GB
+

In addition to Llava, the NanoVLM pipeline supports VILA and mini vision models that run on Orin Nano as well.

+

+

1. Chat with Llava using text-generation-webui

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.2GB for text-generation-webui container image
    • +
    • Space for models
        +
      • CLIP model : 1.7GB
      • +
      • Llava-v1.5-13B-GPTQ model : 7.25GB
      • +
      +
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

Download Model

+
jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \
+  python3 download-model.py --output=/data/models/text-generation-webui \
+    TheBloke/llava-v1.5-13B-GPTQ
+
+

Start Web UI with Multimodal Extension

+
jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \
+  python3 server.py --listen \
+    --model-dir /data/models/text-generation-webui \
+    --model TheBloke_llava-v1.5-13B-GPTQ \
+    --multimodal-pipeline llava-v1.5-13b \
+    --loader autogptq \
+    --disable_exllama \
+    --verbose
+
+

Go to Chat tab, drag and drop an image into the Drop Image Here area, and your question in the text area and hit Generate:

+

+

Result

+

+

2. Run from the terminal with llava.serve.cli

+
+

What you need

+
    +
  1. +

    One of the following Jetson:

    +

    Jetson AGX Orin 64GB +Jetson AGX Orin (32GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.1GB for llava container
    • +
    • 14GB for Llava-7B (or 26GB for Llava-13B)
    • +
    +
  6. +
+
+

This example uses the upstream Llava repo to run the original, unquantized Llava models from the command-line. It uses more memory due to using FP16 precision, and is provided mostly as a reference for debugging. See the Llava container readme for more info.

+

llava-v1.5-7b

+
jetson-containers run $(autotag llava) \
+  python3 -m llava.serve.cli \
+    --model-path liuhaotian/llava-v1.5-7b \
+    --image-file /data/images/hoover.jpg
+
+

llava-v1.5-13b

+
jetson-containers run $(autotag llava) \
+  python3 -m llava.serve.cli \
+    --model-path liuhaotian/llava-v1.5-13b \
+    --image-file /data/images/hoover.jpg
+
+
+

Unquantized 13B may run only on Jetson AGX Orin 64GB due to memory requirements.

+
+ + +

3. Quantized GGUF models with llama.cpp

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
+
+

llama.cpp is one of the faster LLM API's, and can apply a variety of quantization methods to Llava to reduce its memory usage and runtime. Despite its name, it uses CUDA. There are pre-quantized versions of Llava-1.5 available in GGUF format for 4-bit and 5-bit:

+ +
jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \
+  /bin/bash -c './llava-cli \
+    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \
+    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \
+    --n-gpu-layers 999 \
+    --image /data/images/hoover.jpg \
+    --prompt "What does the sign say"'
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
QuantizationBitsResponseTokens/secMemory
Q4_K4The sign says "Hoover Dam, Exit 9."10.179.2 GB
Q5_K5The sign says "Hoover Dam exit 9."9.7310.4 GB
+

A lower temperature like 0.1 is recommended for better quality (--temp 0.1), and if you omit --prompt it will describe the image:

+
jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \
+  /bin/bash -c './llava-cli \
+    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \
+    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \
+    --n-gpu-layers 999 \
+    --image /data/images/lake.jpg'
+
+In this image, a small wooden pier extends out into a calm lake, surrounded by tall trees and mountains. The pier seems to be the only access point to the lake. The serene scene includes a few boats scattered across the water, with one near the pier and the others further away. The overall atmosphere suggests a peaceful and tranquil setting, perfect for relaxation and enjoying nature.
+
+

You can put your own images in the mounted jetson-containers/data directory. The C++ code for llava-cli can be found here. The llama-cpp-python bindings also support Llava, however they are significantly slower from Python for some reason (potentially pre-processing)

+

4. Optimized Multimodal Pipeline with NanoVLM

+
+

What's Next

+

This section got too long and was moved to the NanoVLM page - check it out there for performance optimizations, mini VLMs, and live streaming!

+
+

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_minigpt4.html b/tutorial_minigpt4.html new file mode 100644 index 00000000..742a2b7d --- /dev/null +++ b/tutorial_minigpt4.html @@ -0,0 +1,1778 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Mini-GPT4 - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - MiniGPT-4

+

Give your locally running LLM an access to vision, by running MiniGPT-4 on Jetson!

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    + +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

Start minigpt4 container with models

+

To start the MiniGPT4 container and webserver with the recommended models, run this command:

+
jetson-containers run $(autotag minigpt4) /bin/bash -c 'cd /opt/minigpt4.cpp/minigpt4 && python3 webui.py \
+  $(huggingface-downloader --type=dataset maknee/minigpt4-13b-ggml/minigpt4-13B-f16.bin) \
+  $(huggingface-downloader --type=dataset maknee/ggml-vicuna-v0-quantized/ggml-vicuna-13B-v0-q5_k.bin)'
+
+

Then, open your web browser and access http://<IP_ADDRESS>:7860.

+

Results

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_mmj.html b/tutorial_mmj.html new file mode 100644 index 00000000..72960a3d --- /dev/null +++ b/tutorial_mmj.html @@ -0,0 +1,1973 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + First Steps - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + +

First Steps

+ +

17# First steps with Metropolis Microservices for Jetson

+

NVIDIA Metropolis Microservices for Jetson simplifies the development of vision AI applications, offering a suite of customizable, cloud-native tools. Before diving into this tutorial, ensure you've filled out the Metropolis Microservices for Jetson Early Access form to gain the necessary access to launch the services. This step is crucial as it enables you to utilize all the features and capabilities discussed in this guide.

+

Perfect for both newcomers and experts, this tutorial provides straightforward steps to kick-start your edge AI projects. Whether you're a student or an ecosystem partner working on a use case, this guide offers a straightforward start for every skill level.

+

android2

+

0. Install NVIDIA Jetson Services:

+

Ok, let's start by installing NVIDIA Jetson Services: +

sudo apt install nvidia-jetson-services
+

+

Let's add some performance hacks that will be needed to run the demo faster and without streaming artifacts:

+
    +
  • +

    If you don't have the Orin at max performance, you can use these two commands, a reboot is needed after: +

    sudo nvpmodel -m 0 
    +sudo jetson_clocks
    +

    +
  • +
  • +

    After these two commands, a reboot is needed if your Jetson wasn't already in high-performance mode. +These are optional, but they fine-tune your network buffers to ensure smoother streaming by optimizing how much data can be sent and received: +

    sudo sysctl -w net.core.rmem_default=2129920
    +sudo sysctl -w net.core.rmem_max=10000000
    +sudo sysctl -w net.core.wmem_max=2000000
    +

    +
  • +
+
+

1. Download NVIDIA CLI for Jetson

+

Download NGC for ARM64 from the NGC for CLI site: +

unzip ngccli_arm64.zip
+chmod u+x ngc-cli/ngc
+echo "export PATH=\"\$PATH:$(pwd)/ngc-cli\"" >> ~/.bash_profile && source ~/.bash_profile
+ngc config set
+
+Here it will ask for your API Key, and the organization name, to get those you need to login into NGC and generate an API key here.

+

You should then paste the API key and use the organization name you are using. You can also press [Enter] to select the default values for the remaining options. After this, you should get the message:

+
Successfully saved NGC configuration to /home/jetson/.ngc/config
+
+

Then, login with the same API key: +

sudo docker login nvcr.io -u "\$oauthtoken" -p <NGC-API-KEY>
+

+

Now launch the Redis and Ingress services, as we need them for this tutorial.

+
sudo systemctl start jetson-redis
+sudo systemctl start jetson-ingress
+
+
+

2. Download and launch NVStreamer

+

+

First, we need to install NVStreamer, an app that streams the videos MMJs will need to run AI on them. Follow this NVStreamer Link (In the top-left, click Download files.zip)

+

unzip files.zip
+rm files.zip
+tar -xvf nvstreamer.tar.gz
+cd nvstreamer
+
+Launch it: +
sudo docker compose -f compose_nvstreamer.yaml up -d  --force-recreate
+

+
+

3. Download AI_NVR and launch:

+

+

AI NVR (NGC) Link (Top-left -> Download files.zip)

+
unzip files.zip
+rm files.zip
+tar -xvf ai_nvr.tar.gz
+sudo cp ai_nvr/config/ai-nvr-nginx.conf /opt/nvidia/jetson/services/ingress/config/
+cd ai_nvr
+sudo docker compose -f compose_agx.yaml up -d --force-recreate
+
+

4. Download some sample videos and upload them to NVStreamer

+

Download them from here.

+

unzip files.zip
+
+Ok, now, this is important to understand, there are 2 web interfaces:

+
    +
  1. The NVStream Streamer Dashboard, running in: http://localhost:31000
  2. +
  3. The NVStreamer Camera Management Dashboard, running in: http://localhost:31000
  4. +
+

So, first we need to upload the file in the Streamer interface, it looks like this:

+
+

image1

+
+

There, go to File Upload, and drag and drop the file in the upload squared area.

+

After uploading it, go to the Dashboad option of the left menu, and copy the RTSP URL of the video you just uploaded, you will need it for the Camera Management Dashboard.

+

Now jump to the Camera Management Dashboard (http://localhost:30080/vst), it looks like this:

+
+

image2

+
+

Go to the Camera Management option of the menu, then use the Add device manually option, and paste the RTSP URL, add the name of your video to the Name and Location text boxes, so it will be displayed on top of the stream.

+

Finally, click in the Live Streams option of the left menu, and you should be able to watch your video stream.

+
+

video1

+
+

5. Watch RTSP AI processed streaming from VLC

+

Open VLC from another computer (localhost doesn't work here), and point to your Jetson Orin's IP address (you should be in the same network, or not having a firewal to access).

+

The easiest way to get Jetson's ip is launching: +

ifconfig
+
+And checking the IP of the interface (usually wlan0, inet IP).

+

Then go to rtsp://[JETSON_IP]:8555/ds-test using VLC like this:

+
+

video2

+
+

6. Android app

+

There is an Android app that allows you to track events and create areas of interest to monitor, you can find it on Google Play as AI NVR.

+

+
+

Here is a quick walkthough where you can see how to:

+
    +
  • Add the IP address of the Jetson
  • +
  • Track current events
  • +
  • Add new areas of interest
  • +
  • Add tripwire to track the flux and direction of events
  • +
+

android2

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nano-llm.html b/tutorial_nano-llm.html new file mode 100644 index 00000000..e6a28803 --- /dev/null +++ b/tutorial_nano-llm.html @@ -0,0 +1,1814 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NanoLLM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

NanoLLM - Optimized LLM Inference

+

NanoLLM is a lightweight, high-performance library using optimized inferencing APIs for quantized LLM’s, multimodality, speech services, vector databases with RAG, and web frontends. It's used to build many of the responsive, low-latency agents featured on this site.

+

+

It provides similar APIs to HuggingFace, backed by highly-optimized inference libraries and quantization tools:

+
NanoLLM Reference Documentation
from nano_llm import NanoLLM
+
+model = NanoLLM.from_pretrained(
+   "meta-llama/Meta-Llama-3-8B-Instruct",  # HuggingFace repo/model name, or path to HF model checkpoint
+   api='mlc',                              # supported APIs are: mlc, awq, hf
+   api_token='hf_abc123def',               # HuggingFace API key for authenticated models ($HUGGINGFACE_TOKEN)
+   quantization='q4f16_ft'                 # q4f16_ft, q4f16_1, q8f16_0 for MLC, or path to AWQ weights
+)
+
+response = model.generate("Once upon a time,", max_new_tokens=128)
+
+for token in response:
+   print(token, end='', flush=True)
+
+

Containers

+

To test a chat session with Llama from the command-line, install jetson-containers and run NanoLLM like this:

+

git clone https://github.com/dusty-nv/jetson-containers
+bash jetson-containers/install.sh
+
+
jetson-containers run \
+  --env HUGGINGFACE_TOKEN=hf_abc123def \
+  $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api mlc \
+    --model meta-llama/Meta-Llama-3-8B-Instruct \
+    --prompt "Can you tell me a joke about llamas?"
+

+

If you haven't already, request access to the Llama models on HuggingFace and substitute your account's API token above.

+

Resources

+

Here's an index of the various tutorials & examples using NanoLLM on Jetson AI Lab:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
BenchmarksBenchmarking results for LLM, SLM, VLM using MLC/TVM backend.
API ExamplesPython code examples for chat, completion, and multimodal.
DocumentationReference documentation for the NanoLLM model and agent APIs.
LlamaspeakTalk verbally with LLMs using low-latency ASR/TTS speech models.
Small LLM (SLM)Focus on language models with reduced footprint (7B params and below)
Live LLaVARealtime live-streaming vision/language models on recurring prompts.
Nano VLMEfficient multimodal pipeline with one-shot image tagging and RAG support.
+
+ +
+
+ +
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nano-vlm.html b/tutorial_nano-vlm.html new file mode 100644 index 00000000..ab0a4bfe --- /dev/null +++ b/tutorial_nano-vlm.html @@ -0,0 +1,2010 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NanoVLM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

NanoVLM - Efficient Multimodal Pipeline

+

We saw in the previous LLaVA tutorial how to run vision-language models through tools like text-generation-webui and llama.cpp. In a similar vein to the SLM page on Small Language Models, here we'll explore optimizing VLMs for reduced memory usage and higher performance that reaches interactive levels (like in Liva LLava). These are great for fitting on Orin Nano and increasing the framerate.

+

There are 3 model families currently supported: Llava, VILA, and Obsidian (mini VLM)

+

VLM Benchmarks

+ + +

This FPS measures the end-to-end pipeline performance for continuous streaming like with Live Llava (on yes/no question)

+ + +

Multimodal Chat

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 6 (L4T r36)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 22GB for nano_llm container image
    • +
    • Space for models (>10GB)
    • +
    +
  6. +
  7. +

    Supported VLM models in NanoLLM:

    + +
  8. +
+
+

The optimized NanoLLM library uses MLC/TVM for quantization and inference provides the highest performance. It efficiently manages the CLIP embeddings and KV cache. You can find Python code for the chat program used in this example here.

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api=mlc \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-context-len 256 \
+    --max-new-tokens 32
+
+

This starts an interactive console-based chat with Llava, and on the first run the model will automatically be downloaded from HuggingFace and quantized using MLC and W4A16 precision (which can take some time). See here for command-line options.

+

You'll end up at a >> PROMPT: in which you can enter the path or URL of an image file, followed by your question about the image. You can follow-up with multiple questions about the same image. Llava does not understand multiple images in the same chat, so when changing images, first reset the chat history by entering clear or reset as the prompt. VILA supports multiple images (area of active research)

+

Automated Prompts

+

During testing, you can specify prompts on the command-line that will run sequentially:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api=mlc \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-context-len 256 \
+    --max-new-tokens 32 \
+    --prompt '/data/images/hoover.jpg' \
+    --prompt 'what does the road sign say?' \
+    --prompt 'what kind of environment is it?' \
+    --prompt 'reset' \
+    --prompt '/data/images/lake.jpg' \
+    --prompt 'please describe the scene.' \
+    --prompt 'are there any hazards to be aware of?'
+
+

You can also use --prompt /data/prompts/images.json to run the test sequence, the results of which are in the table below.

+

Results

+ + +

•   The model responses are with 4-bit quantization enabled, and are truncated to 128 tokens for brevity.
+•   These chat questions and images are from /data/prompts/images.json (found in jetson-containers)

+

JSON

+

When prompted, these models can also output in constrained JSON formats (which the LLaVA authors cover in their LLaVA-1.5 paper), and can be used to programatically query information about the image:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api=mlc \
+    --model liuhaotian/llava-v1.5-13b \
+    --prompt '/data/images/hoover.jpg' \
+    --prompt 'extract any text from the image as json'
+
+{
+  "sign": "Hoover Dam",
+  "exit": "2",
+  "distance": "1 1/2 mile"
+}
+
+

Web UI

+

To use this through a web browser instead, see the llamaspeak tutorial:

+

+

Live Streaming

+

These models can also be used with the Live Llava agent for continuous streaming - just substitute the desired model name below:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.agents.video_query --api=mlc \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-context-len 256 \
+    --max-new-tokens 32 \
+    --video-input /dev/video0 \
+    --video-output webrtc://@:8554/output
+
+

Then navigate your browser to https://<IP_ADDRESS>:8050 after launching it with your camera. Using Chrome or Chromium is recommended for a stable WebRTC connection, with chrome://flags#enable-webrtc-hide-local-ips-with-mdns disabled.

+

The Live Llava tutorial shows how to enable additional features like vector database integration, image tagging, and RAG.

+
+ +
+ +

Video Sequences

+

The VILA-1.5 family of models can understand multiple images per query, enabling video search/summarization, action & behavior analysis, change detection, and other temporal-based vision functions. By manipulating the KV cache and dropping off the last frame from the chat history, we can keep the stream rolling continuously beyond the maximum context length of the model. The vision/video.py example shows how to use this:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.vision.video \
+    --model Efficient-Large-Model/VILA1.5-3b \
+    --max-images 8 \
+    --max-new-tokens 48 \
+    --video-input /data/my_video.mp4 \
+    --video-output /data/my_output.mp4 \
+    --prompt 'What changes occurred in the video?'
+
+ +

Note: support will be added to the web UI for continuous multi-image queries on video sequences and is WIP.

+

Python Code

+

For a simplified code example of doing live VLM streaming from Python, see here in the NanoLLM docs.

+ + +

You can use this to implement customized prompting techniques and integrate with other vision pipelines. This code applies the same set of prompts to the latest image from the video feed. See here for the version that does multi-image queries on video sequences.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nanodb.html b/tutorial_nanodb.html new file mode 100644 index 00000000..4b6d12e1 --- /dev/null +++ b/tutorial_nanodb.html @@ -0,0 +1,1891 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NanoDB - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - NanoDB

+

Let's run NanoDB's interactive demo to witness the impact of Vector Database that handles multimodal data.

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 16GB for container image
    • +
    • 40GB for MS COCO dataset
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Download COCO

+

Just for an example, let's use MS COCO dataset:

+
cd jetson-containers
+mkdir -p data/datasets/coco/2017
+cd data/datasets/coco/2017
+
+wget http://images.cocodataset.org/zips/train2017.zip
+wget http://images.cocodataset.org/zips/val2017.zip
+wget http://images.cocodataset.org/zips/unlabeled2017.zip
+
+unzip train2017.zip
+unzip val2017.zip
+unzip unlabeled2017.zip
+
+

Download Index

+

You can download a pre-indexed NanoDB that was already prepared over the COCO dataset from here:

+
cd jetson-containers/data
+wget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz
+tar -xzvf nanodb_coco_2017.tar.gz
+
+

This allow you to skip the indexing process in the next step, and jump to starting the Web UI.

+

Indexing Data

+

If you didn't download the NanoDB index for COCO from above, we need to build the index by scanning your dataset directory:

+
jetson-containers run $(autotag nanodb) \
+  python3 -m nanodb \
+    --scan /data/datasets/coco/2017 \
+    --path /data/nanodb/coco/2017 \
+    --autosave --validate 
+
+

This will take a few hours on AGX Orin. Once the database has loaded and completed any start-up operations , it will drop down to a > prompt from which the user can run search queries. You can quickly check the operation by typing your query on this prompt:

+
> a girl riding a horse
+
+* index=80110   /data/datasets/coco/2017/train2017/000000393735.jpg      similarity=0.29991915822029114
+* index=158747  /data/datasets/coco/2017/unlabeled2017/000000189708.jpg  similarity=0.29254037141799927
+* index=123846  /data/datasets/coco/2017/unlabeled2017/000000026239.jpg  similarity=0.292171448469162
+* index=127338  /data/datasets/coco/2017/unlabeled2017/000000042508.jpg  similarity=0.29118549823760986
+* index=77416   /data/datasets/coco/2017/train2017/000000380634.jpg      similarity=0.28964102268218994
+* index=51992   /data/datasets/coco/2017/train2017/000000256290.jpg      similarity=0.28929752111434937
+* index=228640  /data/datasets/coco/2017/unlabeled2017/000000520381.jpg  similarity=0.28642547130584717
+* index=104819  /data/datasets/coco/2017/train2017/000000515895.jpg      similarity=0.285491943359375
+
+

You can press Ctrl+C to exit. For more info about the various options available, see the NanoDB container documentation.

+

Interactive Web UI

+

Spin up the Gradio server:

+
jetson-containers run $(autotag nanodb) \
+  python3 -m nanodb \
+    --path /data/nanodb/coco/2017 \
+    --server --port=7860
+
+

Then navigate your browser to http://<IP_ADDRESS>:7860, and you can enter text search queries as well as drag/upload images:

+ +
+

To use the dark theme, navigate to http://<IP_ADDRESS>:7860/?__theme=dark instead

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nanoowl.html b/tutorial_nanoowl.html new file mode 100644 index 00000000..cd54f1d9 --- /dev/null +++ b/tutorial_nanoowl.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_nanosam.html b/tutorial_nanosam.html new file mode 100644 index 00000000..f0e8988a --- /dev/null +++ b/tutorial_nanosam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_ollama.html b/tutorial_ollama.html new file mode 100644 index 00000000..2966dfc2 --- /dev/null +++ b/tutorial_ollama.html @@ -0,0 +1,1818 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + ollama - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - Ollama

+

Ollama is a popular LLM tool that's easy to get started with, and includes a built-in model library of pre-quantized weights that will automatically be downloaded and run using llama.cpp underneath for inference. The ollama container was compiled with CUDA support.

+

+ +

Ollama Server

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 7GB for ollama container image
    • +
    • Space for models (>5GB)
    • +
    +
  6. +
+
+
# models cached under jetson-containers/data
+jetson-containers run --name ollama $(autotag ollama)
+
+# models cached under your user's home directory
+docker run --runtime nvidia --rm --network=host -v ~/ollama:/ollama -e OLLAMA_MODELS=/ollama dustynv/ollama:r36.2.0
+
+

Running either of these will start the local Ollama server as a daemon in the background. It will save the models it downloads under your mounted jetson-containers/data/models/ollama directory (or another directory that you override with OLLAMA_MODELS)

+

Ollama Client

+

Start the Ollama command-line chat client with your desired model (for example: llama3, phi3, mistral)

+
# if running inside the same container as launched above
+/bin/ollama run phi3
+
+# if launching a new container for the client in another terminal
+jetson-containers run $(autotag ollama) /bin/ollama run phi3
+
+

Or you can install Ollama's binaries for arm64 outside of container (without CUDA, which only the server needs)

+
# download the latest ollama release for arm64 into /bin
+sudo wget https://github.com/ollama/ollama/releases/download/$(git ls-remote --refs --sort="version:refname" --tags https://github.com/ollama/ollama | cut -d/ -f3- | sed 's/-rc.*//g' | tail -n1)/ollama-linux-arm64 -O /bin/ollama
+sudo chmod +x /bin/ollama
+
+# use the client like normal outside container
+/bin/ollama run phi3
+
+

Open WebUI

+

To run an Open WebUI server for client browsers to connect to, use the open-webui container:

+
docker run -it --rm --network=host --add-host=host.docker.internal:host-gateway ghcr.io/open-webui/open-webui:main
+
+

You can then navigate your browser to http://JETSON_IP:8080, and create a fake account to login (these credentials are only local)

+

+

Ollama uses llama.cpp for inference, which various API benchmarks and comparisons are provided for on the Llava page. It gets roughly half of peak performance versus the faster APIs like NanoLLM, but is generally considered fast enough for text chat.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_sam.html b/tutorial_sam.html new file mode 100644 index 00000000..84638f13 --- /dev/null +++ b/tutorial_sam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_slm.html b/tutorial_slm.html new file mode 100644 index 00000000..b86f4d46 --- /dev/null +++ b/tutorial_slm.html @@ -0,0 +1,1857 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Small LLM (SLM) - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - Small Language Models (SLM)

+

Small Language Models (SLMs) represent a growing class of language models that have <7B parameters - for example StableLM, Phi-2, and Gemma-2B. Their smaller memory footprint and faster performance make them good candidates for deploying on Jetson Orin Nano. Some are very capable with abilities at a similar level as the larger models, having been trained on high-quality curated datasets.

+

+

This tutorial shows how to run optimized SLMs with quantization using the NanoLLM library and MLC/TVM backend. You can run these models through tools like text-generation-webui and llama.cpp as well, just not as fast - and since the focus of SLMs is reduced computational and memory requirements, here we'll use the most optimized path available. Those shown below have been profiled:

+

SLM Benchmarks

+ + + + +
+

•   The HuggingFace Open LLM Leaderboard is a collection of multitask benchmarks including reasoning & comprehension, math, coding, history, geography, ect.
+•   The model's memory footprint includes 4-bit weights and KV cache at full context length (factor in extra for process overhead, library code, ect)
+•   The Chat Model is the instruction-tuned variant for chatting with in the commands below, as opposed to the base completion model.

+
+

Based on user interactions, the recommended models to try are stabilityai/stablelm-zephyr-3b and princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT, for having output quality on par with Llama-2-7B and well-optimized neural architectures. These models have also been used as the base for various fine-tunes (for example Nous-Capybara-3B-V1.9) and mini VLMs. Others may not be particularly coherent.

+

Chatting with SLMs

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 22GB for nano_llm container image
    • +
    • Space for models (>5GB)
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

The nano_llm.chat program will automatically download and quantize models from HuggingFace like those listed in the table above:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api=mlc \
+    --model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT
+
+
+

•   For models requiring authentication, use --env HUGGINGFACE_TOKEN=<YOUR-ACCESS-TOKEN>
+•   Press Ctrl+C twice in succession to exit (once will interrupt bot output)

+
+

This will enter into interactive mode where you chat back and forth using the keyboard (entering reset will clear the chat history)

+

+

Automated Prompts

+

During testing, you can specify prompts on the command-line that will run sequentially:

+
jetson-containers run $(autotag nano_llm) \
+  python3 -m nano_llm.chat --api=mlc \
+    --model stabilityai/stablelm-zephyr-3b \
+    --max-new-tokens 512 \
+    --prompt 'hi, how are you?' \
+    --prompt 'whats the square root of 900?' \
+    --prompt 'can I get a recipie for french onion soup?'
+
+

You can also load JSON files containing prompt sequences, like with --prompt /data/prompts/qa.json (the output of which is below)

+

Results

+ + +

•   The model responses are with 4-bit quantization, and are truncated to 256 tokens for brevity.
+•   These chat questions are from /data/prompts/qa.json (found in jetson-containers)

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_stable-diffusion-xl.html b/tutorial_stable-diffusion-xl.html new file mode 100644 index 00000000..1dfcd23d --- /dev/null +++ b/tutorial_stable-diffusion-xl.html @@ -0,0 +1,1832 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Stable Diffusion XL - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - Stable Diffusion XL

+

Stable Diffusion XL is a newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities. All told, SDXL 1.0 has 6.6 billion model parameters, in comparison to 0.98 billion for the original SD 1.5 model.

+

a robot searching a database for images, nvidia green colors

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices (SDXL requires >= ~13GB memory)

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.8GB for container image
    • +
    • 12.4GB for SDXL models
    • +
    +
  6. +
  7. +

    Have followed the previous stable-diffusion-webui tutorial and have the webserver container running.

    +
  8. +
+
+

Downloading SDXL Models

+

Stable Diffusion XL is supported through AUTOMATIC1111's stable-diffusion-webui with some additional settings. First you need to download the SDXL models to your jetson-containers data directory (which is automatically mounted into the container)

+
# run these outside of container, and replace CONTAINERS_DIR with the path to the jetson-containers repo on your device
+CONTAINERS_DIR=/path/to/your/jetson-containers
+MODEL_DIR=$CONTAINERS_DIR/data/models/stable-diffusion/models/Stable-diffusion/
+
+sudo chown -R $USER $MODEL_DIR
+
+wget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors
+wget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors
+
+
+

It's assumed that you already have the stable-diffusion-webui container and webserver running from the previous tutorial.

+
+

SDXL Settings

+

After the models have finished downloading, click the 🔄 button to refresh the model list in the web UI. Select sd_xl_base_1.0.safetensors from the Stable Diffusion checkpoint drop-down:

+

+

Then under the Generation tab, expand the Refiner section, and select sd_xl_refiner_1.0.safetensors from the drop-down:

+

+

Guidance on other relevant settings:

+
    +
  • Change the width/height to 1024x1024. SDXL works best at higher resolutions, and using 512x512 often results in more simplistic/cartoonish content. Changing image resolutions impacts the actual scene contents of the image, not just the details.
  • +
  • The refiner's Switch at setting dictates the step at which the refiner takes over from the base model. At this point, additional subject content will no longer be added to the scene, but rather its details further refined in the image.
  • +
  • Typical Sampling steps are between 20-40 and Switch at is between 0.7-0.9. This takes experimentation to find the best combination for the characteristics of your desired output.
  • +
  • Extensive negative prompts are not as necessary in the same way as SD 1.5 was (e.g. low quality, jpeg artifacts, blurry, ect)
  • +
  • CFG Scale controls how closely the model conforms to your prompt versus how creative it is.
  • +
+

When you get a good image, remember to save your random seed and settings so you can re-generate it later!

+

Results

+

+
+


+photograph of a friendly robot alongside a person climbing a mountain (seed 1576166644, steps 25, switch @ 0.8, cfg scale 15)

+
+
+


+a girl and a boy building a friendly robot in their basement workshop (seed 642273464, steps 25, switch @ 0.9, cfg scale 7)

+
+
+


+small friendly robots playing games with people, in a futuristic Tokyo central park gardens with cherry blossoms and water, coy fish swimming in the water, sunshine (seed 642273464, steps 40, switch @ 0.8, cfg scale 7)

+
+
+


+small friendly robots playing games with people in a futuristic New York City Central Park in autumn, water (seed 642273464, steps 25, switch @ 0.8, cfg scale 7)

+
+
+

Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_stable-diffusion.html b/tutorial_stable-diffusion.html new file mode 100644 index 00000000..a7382564 --- /dev/null +++ b/tutorial_stable-diffusion.html @@ -0,0 +1,1832 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Stable Diffusion - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + +

Tutorial - Stable Diffusion

+

Let's run AUTOMATIC1111's stable-diffusion-webui on NVIDIA Jetson to generate images from our prompts!

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.8GB for container image
    • +
    • 4.1GB for SD 1.5 model
    • +
    +
  6. +
+
+

Setup a container for stable-diffusion-webui

+

The jetson-containers project provides pre-built Docker images for stable-diffusion-webui. You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

+
git clone https://github.com/dusty-nv/jetson-containers
+bash jetson-containers/install.sh
+
+
+

Info

+

JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

+

+
+

How to start

+

Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

+
jetson-containers run $(autotag stable-diffusion-webui)
+
+

The container has a default run command (CMD) that will automatically start the webserver like this:

+
cd /opt/stable-diffusion-webui && python3 launch.py \
+  --data=/data/models/stable-diffusion \
+  --enable-insecure-extension-access \
+  --xformers \
+  --listen \
+  --port=7860
+
+

You should see it downloading the model checkpoint on the first run.

+

Open your browser and access http://<IP_ADDRESS>:7860

+

Results / Output Examples

+

+

+

Stable Diffusion XL

+

To generate even higher-quality and detailed images, check out the next part of the tutorial that uses the latest Stable Diffusion XL models!

+
+

Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_tam.html b/tutorial_tam.html new file mode 100644 index 00000000..0aa2157c --- /dev/null +++ b/tutorial_tam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_text-generation.html b/tutorial_text-generation.html new file mode 100644 index 00000000..e362775c --- /dev/null +++ b/tutorial_text-generation.html @@ -0,0 +1,2068 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + text-generation-webui - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Tutorial - text-generation-webui

+

Interact with a local AI assistant by running a LLM with oobabooga's text-generaton-webui on NVIDIA Jetson!

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️1

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.2GB for container image
    • +
    • Spaces for models
    • +
    +
  6. +
+
+

Set up a container for text-generation-webui

+

The jetson-containers project provides pre-built Docker images for text-generation-webui along with all of the loader API's built with CUDA enabled (llama.cpp, ExLlama, AutoGPTQ, Transformers, ect). You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

+
git clone https://github.com/dusty-nv/jetson-containers
+bash jetson-containers/install.sh
+
+
+

Info

+

JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

+

+
+

How to start

+

Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

+
jetson-containers run $(autotag text-generation-webui)
+
+

The container has a default run command (CMD) that will automatically start the webserver like this:

+
cd /opt/text-generation-webui && python3 server.py \
+  --model-dir=/data/models/text-generation-webui \
+  --chat \
+  --listen
+
+

Open your browser and access http://<IP_ADDRESS>:7860.

+

Download a model on web UI

+

See the oobabooga documentation for instructions for downloading models - either from within the web UI, or using download-model.py

+
jetson-containers run --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \
+  'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'
+
+

From within the web UI, select Model tab and navigate to "Download model or LoRA" section.

+

You can find text generation models on Hugging Face Hub, then enter the Hugging Face username/model path (which you can have copied to your clipboard from the Hub). Then click the Download button.

+

GGUF models

+

The fastest oobabooga model loader to use is currently llama.cpp with 4-bit quantized GGUF models.

+

You can download a single model file for a particular quantization, like *.Q4_K_M.bin. Input the file name and hit Download button.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelQuantizationMemory (MB)
TheBloke/Llama-2-7b-Chat-GGUFllama-2-7b-chat.Q4_K_M.gguf5,268
TheBloke/Llama-2-13B-chat-GGUFllama-2-13b-chat.Q4_K_M.gguf8,609
TheBloke/LLaMA-30b-GGUFllama-30b.Q4_K_S.gguf19,045
TheBloke/Llama-2-70B-chat-GGUFllama-2-70b-chat.Q4_K_M.gguf37,655
+

+
+

Info

+

Model selection for Jetson Orin Nano

+

Jetson Orin Nano Developer Kit has only 8GB RAM for both CPU (system) and GPU, so you need to pick a model that fits in the RAM size - see the Model Size section below. The 7B models with 4-bit quantization are the ones to use on Jetson Orin Nano. Make sure you go through the RAM optimization steps before attempting to load such model on Jetson Orin Nano.

+
+

Load a model

+

After you have downloaded a model, click the 🔄 button to refresh your model list, and select the model you want to use.

+

For a GGUF model, remember to

+
    +
  • Set n-gpu-layers to 128
  • +
  • Set n_gqa to 8 if you using Llama-2-70B (on Jetson AGX Orin 64GB)
  • +
+

Then click the Load button.

+

Chat Template

+

If you're using a Llama model fine-tuned for chat, like the models listed above (except for LLaMA-30b), you need to use the oobabooga Instruct mode and set the template. On the Parameters tab, go to the Instruction Template sub-tab, then select Llama-v2 from the Instruction Template drop-down (or Vicuna, Guanaco, ect if you are using one of those models)

+
+

For the base text completion models (like LLaMA-30b), use the Default or Notebook tab.

+
+

Selecting the right chat template will make sure the model is being prompted correctly - you can also change the system prompt in the Context box to alter the agent's personality and behavior. There are a lot of other settings under the Generation tab, like the maximum length it should output per reply, and token sampling parameters like temperature and top_p for controlling randomness.

+

Then change back to the Chat tab, and under the mode section, make sure Instruct is selected (confusingly, not chat mode). Then you can start chatting with the LLM!

+

Results

+

+

Things to do with your LLM

+

Here are some common test prompts for coding, math, history ect. You can also ask it about geography, travel, nature, recipies, fixing things, general life advice, and practically everything else. Also Llama-2 is quite playful and likes to play games to test its logic abilities!

+
>> What games do you like to play?
+
+I'm a large language model, so I can play text-based games and answer questions on a wide variety of topics. Here are some game ideas:
+
+- 20 Questions:  You or I can think of an object, and the other player tries to guess what it is by asking up to 20 yes-or-no questions.  The goal is to guess the object in as few questions as possible.
+- Hangman: I can think of a word or phrase and you try to guess it by suggesting letters. Each incorrect guess will bring the hangman one step closer to being complete!
+- Word association: I'll give you a word and you have to come up with a word that is related to it. We can keep playing back and forth as long as you'd like!
+- Trivia: I can provide questions on a wide range of topics, from history to science to pop culture. See how many questions you can answer correctly!
+- Storytelling: I can start telling a story and then stop at a cliffhanger. You can then try to guess what happens next or even take over the storytelling and continue it in your own
+- Jokes: I love to tell jokes and make people laugh with my "Dad humor"!  Knock knock!  *giggles*
+
+

Model size tested

+

With llama.cpp, GGUF model, 4-bit quantization:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Model sizeJetson AGX Orin 64GBJetson AGX Orin 32GBJetson Orin Nano 8GB
70B model
30B model
13B model
7B model
+
+

Want to explore using Python APIs to run LLMs directly?
+See jetson-containers for its LLM related packages and containers.

+
+
+
+
    +
  1. +

    Limited to 7B model (4-bit quantized). 

    +
  2. +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_voicecraft.html b/tutorial_voicecraft.html new file mode 100644 index 00000000..4d041132 --- /dev/null +++ b/tutorial_voicecraft.html @@ -0,0 +1,1815 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + VoiceCraft 🆕 - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - VoiceCraft

+

Let's run VoiceCraft, a Zero-Shot Speech Editing and Text-to-Speech in the Wild!

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    + JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 15.6 GB for voicecraft container image
    • +
    • Space for models
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use run.sh and autotag script to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag voicecraft)
+
+

The container has a default run command (CMD) that will automatically start the Gradio app.

+

Open your browser and access http://<IP_ADDRESS>:7860.

+ + +

Gradio app

+

VoiceCraft repo comes with Gradio demo app.

+
    +
  1. Select which models you want to use, I recommend using 330M_TTSEnhanced on 32GB AGX Orin
  2. +
  3. Click load, if you run it for the first time, models are downloaded from huggingface, otherwise are loaded from /data folder, where are saved to from previous runs
  4. +
  5. Upload audio file of your choice (MP3/wav)
  6. +
  7. Click transcribe, it will use whisper to get transcription along with start/end time of each word spoken
  8. +
  9. Now you can edit the sentence, or use TTS. Click Run to generate output.
  10. +
+

+
+

Warning

+

For TTS it's okay to use only first few seconds of audio as prompt, since it consumes a lot of memory. On AGX 32GB Orin the maximal TTS length of generated audio is around ~16 seconds in headless mode.

+
+

Resources

+

If you want to know how it works under the hood, you can read following papers:

+
    +
  1. VOICECRAFT: Zero-Shot Speech Editing and Text-to-Speech in the Wild
  2. +
  3. High Fidelity Neural Audio Compression
  4. +
  5. Neural Codec Language Models are Zero-Shot Text to Speech Synthesizers
  6. +
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_whisper.html b/tutorial_whisper.html new file mode 100644 index 00000000..a7856e0b --- /dev/null +++ b/tutorial_whisper.html @@ -0,0 +1,1868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Whisper - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - Whisper

+

Let's run OpenAI's Whisper, pre-trained model for automatic speech recognition on Jetson!

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.1 GB for whisper container image
    • +
    • Space for checkpoints
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use run.sh and autotag script to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag whisper)
+
+

The container has a default run command (CMD) that will automatically start the Jupyter Lab server, with SSL enabled.

+

Open your browser and access https://<IP_ADDRESS>:8888.

+
+

Attention

+

Note it is https (not http).

+

HTTPS (SSL) connection is needed to allow ipywebrtc widget to have access to your microphone (for record-and-transcribe.ipynb).

+
+

You will see a warning message like this.

+

+

Press "Advanced" button and then click on "Proceed to (unsafe)" link to proceed to the Jupyter Lab web interface.

+

+
+

The default password for Jupyter Lab is nvidia.

+
+

Run Jupyter notebooks

+

Whisper repo comes with demo Jupyter notebooks, which you can find under /notebooks/ directory.

+

jetson-containers also adds one convenient notebook (record-and-transcribe.ipynb) to record your audio sample on Jupyter notebook in order to run transcribe on your recorded audio.

+

+

record-and-transcribe.ipynb

+

This notebook is to let you record your own audio sample using your PC's microphone and apply Whisper's medium model to transcribe the audio sample.

+

It uses Jupyter notebook/lab's ipywebrtc extension to record an audio sample on your web browser.

+

+
+

Attention

+

When you click the ⏺ botton, your web browser may show a pop-up to ask you to allow it to use your microphone. Be sure to allow the access.

+

+
+Final check +

Once done, if you click on the "⚠ Not secure" part in the URL bar, you should see something like this.

+

+
+
+

Result

+

Once you go through all the steps, you should see the transcribe result in text like this.

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/index.html b/vit/index.html new file mode 100644 index 00000000..0d0624f9 --- /dev/null +++ b/vit/index.html @@ -0,0 +1,1718 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Vision Transformers - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Vision Transformers

+

You can run following Vision Transfer models and applications quickly on Jetson by using jetson-containers.

+

Efficient ViT

+

+

NanoSAM

+

+

NanoOWL

+

+

SAM

+

+

TAM

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_efficientvit.html b/vit/tutorial_efficientvit.html new file mode 100644 index 00000000..4bc9bac7 --- /dev/null +++ b/vit/tutorial_efficientvit.html @@ -0,0 +1,1886 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + EfficientViT - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - EfficientViT

+

Let's run MIT Han Lab's EfficientViT on Jetson!

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 10.9 GB for efficientvit container image
    • +
    • Space for checkpoints
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag efficientvit)
+
+

Usage of EfficientViT

+

The official EfficientViT repo shows the complete usage information: https://github.com/mit-han-lab/efficientvit#usage

+

Run example/benchmark

+

Inside the container, a small benchmark script benchmark.py is added under /opt/efficientvit directory by the jetson-container build process.

+

It is to test EfficientViT-L2-SAM in bounding box mode, so we can use this as an example and verify the output.

+

Download l2.pt model

+
mkdir -p /data/models/efficientvit/sam/
+cd /data/models/efficientvit/sam/
+wget https://huggingface.co/han-cai/efficientvit-sam/resolve/main/l2.pt
+
+
+

The downloaded checkpoint file is stored on the /data/ directory that is mounted from the Docker host.

+
+

Run benchmark script

+
cd /opt/efficientvit
+python3 ./benchmark.py
+
+

At the end you should see a summary like the following.

+
AVERAGE of 2 runs:
+  encoder --- 0.062 sec
+  latency --- 0.083 sec
+Memory consumption :  3419.68 MB
+
+

Check the output/result

+

The output image file (of the last inference result) is stored as /data/benchmarks/efficientvit_sam_demo.png.

+

It is stored under /data/ directory that is mounted from the Docker host.
+So you can go back to your host machine, and check jetson-containers/data/benchmark/ directory.

+

You should find the output like this.

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_nanoowl.html b/vit/tutorial_nanoowl.html new file mode 100644 index 00000000..af0a35e2 --- /dev/null +++ b/vit/tutorial_nanoowl.html @@ -0,0 +1,1840 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NanoOWL - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - NanoOWL

+

Let's run NanoOWL, OWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT.

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 7.2 GB for container image
    • +
    • Spaces for models
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag nanoowl)
+
+

How to run the tree prediction (live camera) example

+
    +
  1. +

    Ensure you have a camera device connected

    +
    ls /dev/video*
    +
    +

    If no video device is found, exit from the container and check if you can see a video device on the host side.

    +
  2. +
  3. +

    Launch the demo +

    cd examples/tree_demo
    +python3 tree_demo.py ../../data/owl_image_encoder_patch32.engine
    +

    +
    +

    Info

    +

    If it fails to find or load the TensorRT engine file, build the TensorRT engine for the OWL-ViT vision encoder on your Jetson device.

    +
    python3 -m nanoowl.build_image_encoder_engine \
    +    data/owl_image_encoder_patch32.engine
    +
    +
    +
  4. +
  5. +

    Second, open your browser to http://<ip address>:7860

    +
  6. +
  7. +

    Type whatever prompt you like to see what works!

    +

    Here are some examples

    +
      +
    • Example: [a face [a nose, an eye, a mouth]]
    • +
    • Example: [a face (interested, yawning / bored)]
    • +
    • Example: (indoors, outdoors)
    • +
    +
  8. +
+

Result

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_nanosam.html b/vit/tutorial_nanosam.html new file mode 100644 index 00000000..ed70ce94 --- /dev/null +++ b/vit/tutorial_nanosam.html @@ -0,0 +1,1808 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + NanoSAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - NanoSAM

+

Let's run NVIDIA's NanoSAM to check out the performance gain by distillation.

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.3GB for container image
    • +
    • Spaces for models
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag nanosam)
+
+

Run examples

+

Inside the container, you can move to /opt/nanosam directory, to go through all the examples demonstrated on the repo.

+
cd /opt/nanosam
+
+

To run the "Example 1 - Segment with bounding box":

+
python3 examples/basic_usage.py \
+    --image_encoder="data/resnet18_image_encoder.engine" \
+    --mask_decoder="data/mobile_sam_mask_decoder.engine"
+
+

The result is saved under /opt/nanosam/data/basic_usage_out.jpg.

+

To check on your host machine, you can copy that into /data directory of the container where that is mounted from the host.

+
cp data/basic_usage_out.jpg /data/
+
+

Then you can go to your host system, and find the file under jetson-containers/data/basic_usage_out.jpg

+

Results

+

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_sam.html b/vit/tutorial_sam.html new file mode 100644 index 00000000..aa43cafb --- /dev/null +++ b/vit/tutorial_sam.html @@ -0,0 +1,1813 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + SAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tutorial - SAM (Segment Anything)

+

Let's run Meta's SAM on NVIDIA Jetson.

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson devices:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️1

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.8GB for container image
    • +
    • Spaces for models
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag sam)
+
+

The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

+

Open your browser and access http://<IP_ADDRESS>:8888.

+
+

The default password for Jupyter Lab is nvidia.

+
+

Run Jupyter notebook

+

In Jupyter Lab, navigate to notebooks and open automatic_mask_generator_example.py notebook.

+

Create a new cell at the top, insert the model download command below and run the cell.

+
!wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
+
+

Then go through executing all the cells below Set-up.

+

Results

+

+
+
+
    +
  1. +

    The biggest vit_h (2.4GB) model may not ran due to OOM, but vit_l (1.1GB) runs on Jetson Orin Nano. 

    +
  2. +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_tam.html b/vit/tutorial_tam.html new file mode 100644 index 00000000..b37b6810 --- /dev/null +++ b/vit/tutorial_tam.html @@ -0,0 +1,1873 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + TAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Tutorial - SAM (Segment Anything)

+

Let's run TAM to perform Segment Anything on videos on NVIDIA Jetson.

+

+
+

What you need

+
    +
  1. +

    One of the following Jetson:

    +

    Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB)

    +
  2. +
  3. +

    Running one of the following versions of JetPack:

    +

    JetPack 5 (L4T r35.x)

    +
  4. +
  5. +

    Sufficient storage space (preferably with NVMe SSD).

    +
      +
    • 6.8GB for container image
    • +
    • Spaces for models
    • +
    +
  6. +
  7. +

    Clone and setup jetson-containers:

    +
    git clone https://github.com/dusty-nv/jetson-containers
    +bash jetson-containers/install.sh
    +
    +
  8. +
+
+

How to start

+

Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

+
jetson-containers run $(autotag tam)
+
+

The container has a default run command (CMD) that will automatically start TAM's web server.

+

Open your browser and access http://<IP_ADDRESS>:12212.

+

TAM web UI

+

Check out the official tutorial to learn how to operate the web UI.

+ + +

Results

+ + +

Troubleshooting

+

FileNotFoundError: [Errno 2] No such file or directory: './checkpoints/E2FGVI-HQ-CVPR22.pth'

+

You may find the TAM app fails to download a checkpoint file E2FGVI-HQ-CVPR22.pth.

+
Downloading checkpoints from Google Drive... tips: If you cannot see the progress bar, please try to download it manuall               and put it in the checkpointes directory. E2FGVI-HQ-CVPR22.pth: https://github.com/MCG-NKU/E2FGVI(E2FGVI-HQ model)
+Access denied with the following error:
+
+        Cannot retrieve the public link of the file. You may need to change
+        the permission to 'Anyone with the link', or have had many accesses. 
+
+You may still be able to access the file from the browser:
+
+         https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 
+
+

You can manually download the checkpoint file on your Docker host machine.

+
cd jetson-containers/
+pip install gdown
+source ~/.profile
+gdown https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 
+mv E2FGVI-HQ-CVPR22.pth ./data/models/tam/
+
+

And you can try running the TAM container.

+
jetson-containers run $(autotag tam)
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + \ No newline at end of file

For running LLM benchmarks, see the MLC container documentation.