diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..8ad97227 --- /dev/null +++ b/404.html @@ -0,0 +1,1214 @@ + + + + + + + + + + + + + + + + + + + + + NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..0713cc29 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +www.jetson-ai-lab.com diff --git a/assets/80s-pop.wav b/assets/80s-pop.wav new file mode 100644 index 00000000..30606cb7 Binary files /dev/null and b/assets/80s-pop.wav differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.220ee61c.min.js b/assets/javascripts/bundle.220ee61c.min.js new file mode 100644 index 00000000..116072a1 --- /dev/null +++ b/assets/javascripts/bundle.220ee61c.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ci=Object.create;var gr=Object.defineProperty;var Ri=Object.getOwnPropertyDescriptor;var ki=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Hi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,nn=Object.prototype.propertyIsEnumerable;var rn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&rn(e,r,t[r]);if(Ht)for(var r of Ht(t))nn.call(t,r)&&rn(e,r,t[r]);return e};var on=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&nn.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Pi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ki(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=Ri(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ci(Hi(e)):{},Pi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var sn=Pt((xr,an)=>{(function(e,t){typeof xr=="object"&&typeof an!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(O){return!!(O&&O!==document&&O.nodeName!=="HTML"&&O.nodeName!=="BODY"&&"classList"in O&&"contains"in O.classList)}function f(O){var Qe=O.type,De=O.tagName;return!!(De==="INPUT"&&s[Qe]&&!O.readOnly||De==="TEXTAREA"&&!O.readOnly||O.isContentEditable)}function c(O){O.classList.contains("focus-visible")||(O.classList.add("focus-visible"),O.setAttribute("data-focus-visible-added",""))}function u(O){O.hasAttribute("data-focus-visible-added")&&(O.classList.remove("focus-visible"),O.removeAttribute("data-focus-visible-added"))}function p(O){O.metaKey||O.altKey||O.ctrlKey||(a(r.activeElement)&&c(r.activeElement),n=!0)}function m(O){n=!1}function d(O){a(O.target)&&(n||f(O.target))&&c(O.target)}function h(O){a(O.target)&&(O.target.classList.contains("focus-visible")||O.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(O.target))}function v(O){document.visibilityState==="hidden"&&(o&&(n=!0),Y())}function Y(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function B(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(O){O.target.nodeName&&O.target.nodeName.toLowerCase()==="html"||(n=!1,B())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),Y(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var cn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},s=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(B,N){d.append(N,B)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(O){throw new Error("URL unable to set base "+c+" due to "+O)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,Y=!0,B=this;["append","delete","set"].forEach(function(O){var Qe=h[O];h[O]=function(){Qe.apply(h,arguments),v&&(Y=!1,B.search=h.toString(),Y=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,Y&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(f){Object.defineProperty(s,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){a(f)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var qr=Pt((Mt,Nr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof Nr=="object"?Nr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ai}});var s=i(279),a=i.n(s),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(T){return!1}}var d=function(T){var E=p()(T);return m("cut"),E},h=d;function v(j){var T=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[T?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var Y=function(T,E){var H=v(T);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},B=function(T){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof T=="string"?H=Y(T,E):T instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(T==null?void 0:T.type)?H=Y(T.value,E):(H=p()(T),m("copy")),H},N=B;function O(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?O=function(E){return typeof E}:O=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},O(j)}var Qe=function(){var T=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=T.action,H=E===void 0?"copy":E,I=T.container,q=T.target,Me=T.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&O(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function Ei(j,T){if(!(j instanceof T))throw new TypeError("Cannot call a class as a function")}function tn(j,T){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=c()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ai=Li},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,f){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(f))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return c(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),s=f.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var f=this;function c(){f.off(i,c),s.apply(a,arguments)}return c._=s,this.on(i,c,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=a.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var rs=/["'&<>]/;Yo.exports=ns;function ns(e){var t=""+e,r=rs.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof et?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function pn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,f){s=e[i](s),o(a,f,s.done,s.value)})}}function o(i,s,a,f){Promise.resolve(f).then(function(c){i({value:c,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),f=a.next();!f.done;f=a.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{ln(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ln(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function ln(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new xn(r,n)},t}(F);var xn=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,f=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Sn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Oe=new Sn(wn);var M=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Te(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=zi();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return un(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return Ni(e);if(pt(e))return qi(e);if(Nt(e))return Ki(e);if(Kt(e))return On(e);if(Gt(e))return Qi(e);if(Jt(e))return Yi(e)}throw Qt(e)}function Ni(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function qi(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Dn(function(){return new Zt}))}}function Vn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,f=a===void 0?!0:a;return function(c){var u,p,m,d=0,h=!1,v=!1,Y=function(){p==null||p.unsubscribe(),p=void 0},B=function(){Y(),u=m=void 0,h=v=!1},N=function(){var O=u;B(),O==null||O.unsubscribe()};return y(function(O,Qe){d++,!v&&!h&&Y();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,f))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,Y(),p=$r(B,o,$e),De.error($e)},complete:function(){h=!0,Y(),p=$r(B,s),De.complete()}}),U(O).subscribe(u))})(c)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),J())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Kn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>rr(e)),V(rr(e)))}var Yn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Wr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),va?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Wr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ba.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Gn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Jn=typeof WeakMap!="undefined"?new WeakMap:new Yn,Xn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ga.getInstance(),n=new La(t,r,this);Jn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Xn.prototype[e]=function(){var t;return(t=Jn.get(this))[e].apply(t,arguments)}});var Aa=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Xn}(),Zn=Aa;var eo=new x,Ca=$(()=>k(new Zn(e=>{for(let t of e)eo.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ca.pipe(S(t=>t.observe(e)),g(t=>eo.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var to=new x,Ra=$(()=>k(new IntersectionObserver(e=>{for(let t of e)to.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function sr(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function ro(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),J())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function no(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function ka(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ha(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function oo(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:no("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!ka(n,r)}return!0}),pe());return Ha().pipe(g(t=>t?M:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function io(){return new x}function ao(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)ao(e,r)}function _(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)ao(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function so(){return location.hash.substring(1)}function Dr(e){let t=_("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Pa(e){return L(b(window,"hashchange"),e).pipe(l(so),V(so()),A(t=>t.length>0),X(1))}function co(e){return Pa(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function Vr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function fo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function zr(e,t){return e.pipe(g(r=>r?t():M))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>M),g(r=>r.status!==200?Ot(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),X(1))}function uo(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),X(1))}function pr(e){let t=_("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Ot(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function po(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function lo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(po),V(po()))}function mo(){return{width:innerWidth,height:innerHeight}}function ho(){return b(window,"resize",{passive:!0}).pipe(l(mo),V(mo()))}function bo(){return G([lo(),ho()]).pipe(l(([e,t])=>({offset:e,size:t})),X(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(ee("size")),o=G([n,r]).pipe(l(()=>Xe(e)));return G([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:f,y:c}])=>({offset:{x:s.x-f,y:s.y-c+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,f,c)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:f,error:c});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + + +
+
+ +
+
+ + + +
+
+ + + + +

Benchmarks

+

Large Language Models (LLM)

+ + +

For running LLM benchmarks, see the MLC container documentation.

+

Small Language Models (SLM)

+ + +

Small language models are generally defined as having fewer than 7B parameters (Llama-7B shown for reference)
+For more data and info about running these models, see the SLM tutorial and MLC container documentation.

+

Vision Language Models (VLM)

+ + +

This measures the end-to-end pipeline performance for continuous streaming like with Live Llava.
+For more data and info about running these models, see the NanoVLM tutorial.

+

Vision Transformers (ViT)

+ + +

VIT performance data from [1] [2] [3]

+

Stable Diffusion

+ + +

Riva

+ + +

For running Riva benchmarks, see ASR Performance and TTS Performance.

+

Vector Database

+ + +

For running vector database benchmarks, see the NanoDB container documentation.

+ + + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/buy.md.bak b/buy.md.bak new file mode 100644 index 00000000..e4d72ec1 --- /dev/null +++ b/buy.md.bak @@ -0,0 +1,21 @@ +--- +hide: + - navigation + - toc +--- + +# Buy + +Get your Jetson developer kit to try generative AIs in your own local environment. + +!!! note + + The availablility and the price of Jetson developer kits vary depending on your region.
Please click "Jetson Store" button to find out the availability on the page next. + + +| | Product | Action | +|-|-|-| +| |
Jetson AGX Orin 64GB Developer Kit
  • AI Perf: 275 TOPS
  • GPU: NVIDIA Ampere architecture with 2048 NVIDIA CUDA cores and 64 tensor cores
  • CPU: 12-core Arm Cortex-A78AE v8.2 64-bit CPU 3MB L2 + 6MB L3
  • Memory: 64GB 256-bit LPDDR5 | 204.8 GB/s
  • Storage: 64GB eMMC 5.1
| Jetson Store | +| |
Jetson AGX Orin Developer Kit
  • AI Perf: 275 TOPS
  • GPU: NVIDIA Ampere architecture with 2048 NVIDIA CUDA cores and 64 tensor cores
  • CPU: 12-core Arm Cortex-A78AE v8.2 64-bit CPU 3MB L2 + 6MB L3
  • Memory: 32GB 256-bit LPDDR5 | 204.8 GB/s
  • Storage: 64GB eMMC 5.1
| Jetson Store | +| |
Jetson Orin Nano Developer Kit
  • AI Perf: 40 TOPS
  • GPU: 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores
  • CPU: 6-core Arm® Cortex®-A78AE v8.2 64-bit CPU 1.5MB L2 + 4MB L3
  • Memory: 8GB 128-bit LPDDR5 68 GB/s
  • Storage: SD Card Slot & external NVMe via M.2 Key M
| Jetson Store | + diff --git a/community_articles.html b/community_articles.html new file mode 100644 index 00000000..83c60682 --- /dev/null +++ b/community_articles.html @@ -0,0 +1,1656 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Community Projects - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Community Projects

+

Below, you'll find a collection of guides, tutorials, and articles contributed by the community showcasing the implementation of generative AI on the Jetson platform.

+

GitHub Japanese NMT Translation for Stable Diffusion (2-23-2024)

+

Toshihiko Aoki has created a prompt generator for stable-diffusion-webui that translates Japanese queries into English using a fine-tuned GPT-2 NMT model before feeding them into Stable Diffusion. Check out the full guide on GitHub under to-aoki/ja-tiny-sd-webui, including the training dataset and LoRA building!

+ + +

GitHub JetBot Voice to Action Tools: Empowering Your ROS2 Robot with Voice Control (2-17-2024)

+

Jen Hung Ho created ROS2 nodes for ASR/TTS on Jetson Nano that can can be used to control JetBot, including customizable voice commands and the execution of advanced actions. Check it out on GitHub under Jen-Hung-Ho/ros2_jetbot_tools and Jen-Hung-Ho/ros2_jetbot_voice and on the forums here.

+ + +

Hackster ClearWater: Underwater Image Enhancement with Generative AI (2-16-2024)

+

Vy Pham has created a novel denoising pipeline using a custom trained Transformer-based diffusion model and GAN upscaler for image enhancement, running on Jetson AGX Orin. It runs interactively in a Streamlit web UI for photo capturing and the processing of images and videos. Great work!

+ + +

Hackster AI-Powered Application for the Blind and Visually Impaired (12-13-2023)

+

Nurgaliyev Shakhizat demonstrates a locally-hosted Blind Assistant Device running on Jetson AGX Orin 64GB Developer Kit for realtime image-to-speech translation:

+

+
+

  Find more resources about this project here: [Hackster] [GitHub]

+
+

Dave's Armoury Bringing GLaDOS to life with Robotics and AI (2-8-2024)

+

See how DIY robotics legend Dave Niewinski from davesarmoury.com brings GLaDOS to life using Jetson AGX Orin, running LLMs onboard alongside object + depth tracking, and RIVA ASR/TTS with a custom-trained voice model for speech recognition and synthesis! Using Unitree Z1 arm with 3D printing and StereoLabs ZED2.

+ + +
+

  Find more resources about this project here: [Forums] [GitHub]

+
+

Hackster Seeed Studio's Local Voice Chatbot Puts a Speech-Recognizing LLaMa-2 LLM on Your Jetson (2-7-2024)

+

Seeed Studio has announced the launch of the Local Voice Chatbot, an NVIDIA Riva- and LLaMa-2-based large language model (LLM) chatbot with voice recognition capabilities — running entirely locally on NVIDIA Jetson devices, including the company's own reComputer range. Follow the step-by-step guide on the Seeed Studio wiki.

+ + +

YouTube GenAI Nerds React - Insider Look at NVIDIA's Newest Generative AI (2-6-2024)

+

Watch this panel about the latest trends & tech in edge AI, featuring Kerry Shih from OStream, Jim Benson from JetsonHacks, and Dusty from NVIDIA.

+ + +

NVIDIA Bringing Generative AI to Life with NVIDIA Jetson (11-7-2023)

+

Watch this webinar about deploying LLMs, VLMs, ViTs, and vector databases onboard Jetson Orin for building next-generation applications using Generative AI:

+ + +

JetsonHacks Jetson AI Labs – Generative AI Playground (10-31-2023)

+

JetsonHacks publishes an insightful video that walks developers through the typical steps for running generative AI models on Jetson following this site's tutorials. The video shows the interaction with the LLaVA model.

+ + +

Hackster Vision2Audio - Giving the blind an understanding through AI (10-15-2023)

+

Nurgaliyev Shakhizat demonstrates Vision2Audio running on Jetson AGX Orin 64GB Developer Kit to harness the power of LLaVA to help visually impaired people:

+

+

NVIDIA Generative AI Models at the Edge (10-19-2023)

+

Follow this walkthrough of the Jetson AI Lab tutorials along with coverage of the latest features and advances coming to JetPack 6 and beyond:

+ + +
+

  Technical Blog - https://developer.nvidia.com/blog/bringing-generative-ai-to-life-with-jetson/

+
+

Medium How to set up your Jetson device for LLM inference and fine-tuning (10-02-2023)

+

Michael Yuan's guide demonstrating how to set up the Jetson AGX Orin 64GB Developer Kit specifically for large language model (LLM) inference, highlighting the crucial role of GPUs and the cost-effectiveness of the Jetson AGX Orin for LLM tasks.

+
+

  https://medium.com/@michaelyuan_88928/how-to-set-up-your-jetson-device-for-llm-inference-and-fine-tuning-682e36444d43

+
+

Hackster Getting Started with AI on Nvidia Jetson AGX Orin Dev Kit (09-16-2023)

+

Nurgaliyev Shakhizat demonstrates llamaspeak on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

+

+

Hackster New AI Tool Is Generating a Lot of Buzz (09-13-2023)

+

Nick Bild provides an insightful introduction to the Jetson Generative AI Playground:

+
+

  https://www.hackster.io/news/new-ai-tool-is-generating-a-lot-of-buzz-3cc5f23a3598

+
+

JetsonHacks Use These! Jetson Docker Containers Tutorial (09-04-2023)

+

JetsonHacks has a in-depth tutorial on how to use jetson-containers and even show text-generation-webui and stable-diffusion-webui containers in action!

+ + +

Hackster LLaMa 2 LLMs w/ NVIDIA Jetson and textgeneration-web-ui (08-17-2023)

+

Paul DeCarlo demonstrates 13B and 70B parameter LLama 2 models running locally on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

+

+

Hackster Running a ChatGPT-Like LLM-LLaMA2 on a Nvidia Jetson Cluster (08-14-2023)

+

Discover how to run a LLaMA-2 7B model on an NVIDIA Jetson cluster in this insightful tutorial by Nurgaliyev Shakhizat:

+

+

JetsonHacks Speech AI on NVIDIA Jetson Tutorial (08-07-2023)

+

JetsonHacks gives a nice introduction to NVIDIA RIVA SDK and demonstrate its automated speech recognition (ASR) capability on Jetson Orin Nano Developer Kit.

+ + +

Hackster LLM based Multimodal AI w/ Azure Open AI & NVIDIA Jetson (07-12-2023)

+

Learn how to harness the power of Multimodal AI by running Microsoft JARVIS on an Jetson AGX Orin 64GB Developer Kit, enabling a wide range of AI tasks with ChatGPT-like capabilities, image generation, and more, in this comprehensive guide by Paul DeCarlo.

+

+

Hackster How to Run a ChatGPT-Like LLM on NVIDIA Jetson board (06-13-2023)

+

Nurgaliyev Shakhizat explores voice AI assistant on Jetson using FastChat and VoskAPI.

+

+ + + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/css/colors.css b/css/colors.css new file mode 100644 index 00000000..92c874bb --- /dev/null +++ b/css/colors.css @@ -0,0 +1,315 @@ +[data-md-color-scheme="playful-purple"] { + + --md-accent-fg-color--rgb: 177, 43, 255; /* #b12bff */ + --md-accent-fg-color--hsl: 278, 100%, 58%; + --md-accent-fg-color: hsl(var(--md-accent-fg-color--hsl)); + + --md-accent-fg-color--dark-hsl: 278, 100%, 51%; + --md-accent-fg-color--dark: hsl(var(--md-accent-fg-color--dark-hsl)); + + --md-accent-fg-color--light-rgb: 214, 143, 255; /* D68FFF */ + --md-accent-fg-color--light-hsl: 278, 100%, 78%; + --md-accent-fg-color--light: hsla(var(--md-accent-fg-color--light-hsl), 1.0); + + --md-accent-bg-color--rgb: 255, 255, 255; /* #ffffff */ + --md-accent-bg-color: rgb(var(--md-accent-bg-color--rgb)); + + --md-primary-fg-color: hsl(210, 100%, 40%); + --md-primary-fg-color--light: hsl(210, 100%, 50%); + --md-primary-fg-color--dark: hsl(210, 100%, 20%); + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +[data-md-color-scheme="nv-black-green"] { + + --md-typeset-a-color: #000000; + + --md-accent-fg-color--rgb: #76b900; + --md-accent-fg-color--hsl: hsl(82, 100%, 36%); + --md-accent-fg-color: var(--md-accent-fg-color--hsl); + + --md-accent-fg-color--dark-hsl: hsl(82, 100%, 32%); + --md-accent-fg-color--dark: var(--md-accent-fg-color--dark-hsl); + + --md-accent-fg-color--light-rgb: #81BF15; + --md-accent-fg-color--light-hsl: hsla(82, 80%, 42%, 1.0); + --md-accent-fg-color--light: var(--md-accent-fg-color--light-hsl); + + --md-accent-bg-color--rgb: 255, 255, 255; /* #ffffff */ + --md-accent-bg-color: rgb(#000000); + + --md-primary-fg-color: hsl(82, 0%, 10%); + --md-primary-fg-color--light: hsl(82, 0%, 30%); + --md-primary-fg-color--dark: hsl(82, 0%, 0%); + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +a.nv-buy-link,.load-buy-link { + text-decoration: none; + background-color: #76b900; + color: #000; + border: none; + cursor: pointer; + line-height: 24px; + font-family: Arial,Helvetica,Sans-Serif!important; + transition: all .2s ease-out; + cursor: pointer; + display: inline-block; + font-weight: 700; + font-size: 1.2em; + padding: .625em 2em; +} + +a.nv-buy-link:hover,.load-buy-link:hover { + background-color: #91c733; + color: #000; +} + +.specs-container { + margin-top: 0.6rem; +} + +.specs-container li { + list-style: none; + margin-left: 0!important; + padding: 0!important; +} + +.specs-container ul { + padding: 0; + margin: 0 +} + +.specs { + font-weight: 400; + background: url(https://store.nvidia.com/jetson/store/UL-icon.d0c461d7202592cb.svg); + background-repeat: no-repeat; + background-position: 0 8px; + padding-left: 18px; + padding-bottom: 10px; + font-size: 16px +} + +[data-md-color-scheme="nvgreen"] { + + --md-primary-fg-color: #76B900; + --md-primary-fg-color--light: #C5E1A5; + --md-primary-fg-color--dark: #518000; + --md-accent-fg-color: #004D40; + + --md-mermaid-node-bg-color: #e3f5c5; + --md-mermaid-node-fg-color: #518000; + --md-default-fg-color--lightest: #f9fff2; +} + +[data-md-color-scheme="slate"] { + + --md-primary-fg-color: #76B900; + --md-primary-fg-color--light: #C5E1A5; + --md-primary-fg-color--dark: #518000; + --md-accent-fg-color: #a2ff00; + + --md-mermaid-node-bg-color: #518000; + --md-mermaid-node-fg-color: #e3f5c5; + --md-default-fg-color--lightest: #384e4c; + + --md-hue: 210; + } + +.numberCircle { + + font: Arial, sans-serif; + + width: 1.5em; + height: 1.5em; + box-sizing: initial; + + display: inline-block; + + background: #76B900; + color: #FFFFFF; + text-align: center; + border-radius: 100%; + + line-height: 1.5em; + box-sizing: content-box; +} + + +.roundSquare { + + font: Arial, sans-serif; + + height: 1.25em; + box-sizing: initial; + + display: inline-block; + + background: #004D40; + border: 0.1em solid #CCCCCC; + color: #FFFFFF; + text-align: center; + border-radius: 0.5em; + + line-height: 1.25em; + box-sizing: content-box; + + padding: 0.2em + +} + +.blobDarkGreen1 { + background: #0066467f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen2 { + background: #0085697f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen3 { + background: #00a48f7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen4 { + background: #00c5b97f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobDarkGreen5 { + background: #00e6e67f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} + +.blobLightGreen1 { + background: #76b9007f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen2 { + background: #7fc33a7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen3 { + background: #95d6767f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen4 { + background: #b1e8a97f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobLightGreen5 { + background: #d5f8d87f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} + +.blobPink1 { + background: #ec84d07f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobPink2 { + background: #fe8fbe7f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em +} +.blobPink3 { + background: #ff9fb27f; border-radius: 0.5em; padding: 0.2em; margin: 0.2em; +} + +.blobGitHub { + background: #394657; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobJetsonHacks { + background: #394657; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobDavesArmoury { + background: #f7b700; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobHackster { + background: #2e9fe6; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobMedium { + background: #292929; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobNvidia { + background: #76b900; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.blobYouTube { + background: #FF0000; color: #fff; font-size: 0.8em; border-radius: 0.1em; padding-left: 0.3em; padding-right: 0.3em; margin: 0.2em; +} + +.highlightYellow { + background: #ffc105; + border-radius: 0.5em; + padding: 0.2em +} + +.highlightGreen { + background: #00c753; + border-radius: 0.5em; + padding: 0.2em +} + +.highlightGray { + background: #DDDDDD; + border-radius: 0.5em; + padding: 0.2em; + color: #333333; +} + +.center50 { + display: block; + margin-left: auto; + margin-right: auto; + width: 50%; +} + +.right { + clear: none; + float: right; + width: 50%; + padding: 0rem 0.5rem 0rem 0.5rem ; +} + +.left { + clear: none; + float: left; + width: 50%; + padding: 0rem 0.5rem 0rem 0.5rem ; +} + +.right40 { + clear: none; + float: right; + width: 40%; + padding: 0rem 0.5rem 0rem 1.5rem ; +} + +.left40 { + clear: none; + float: left; + width: 40%; + padding: 0rem 1.5rem 0rem 0.5rem ; +} + +.dmark { + position: relative; + display: inline-block; + border-bottom: 1px dotted black; +} + +.dmark .dmarkpopup { + visibility: hidden; + width: 400px; + background-color: #DDDDDD; + color: #fff; + text-align: center; + border-radius: 12px; + padding: 15px; + margin: 5px; + box-shadow: 5px 5px 15px #004D40; + position: absolute; + z-index: 1; +} + +.dmark:hover .dmarkpopup { + visibility: visible; +} + +.md-footer__inner { display: none } diff --git a/css/extra.css b/css/extra.css new file mode 100644 index 00000000..a7d4dfce --- /dev/null +++ b/css/extra.css @@ -0,0 +1,99 @@ +.md-header { + background: #FFFFFF +} + +.md-logo { + display: none!important; +} + +.md-icon { + color: black; +} + +.md-header__title { + color: #000000; + margin-left: 0.6rem!important; +} + +.md-tabs { + background: #000000 +} + +.md-nav__link--active { + font-weight: 900; +} + +/* Bigger width for md-content for main text */ +.md-grid { + max-width: 90%; /* or 100%, if you want to stretch to full-width */ + } + +.md-banner__inner { + font-size: .7rem; + margin: 0.2rem auto; + margin-top: 0.2rem; + margin-right: auto; + margin-bottom: 0.2rem; + margin-left: auto; + padding: 0 0.8rem; +} + +.md-typeset a { + text-decoration: underline; + text-decoration-color: var(--md-accent-fg-color); + text-decoration-thickness: 2px; + text-underline-offset: 0.3125em; +} + +.md-typeset a:hover { + text-decoration: underline; + text-decoration-color: var(--md-primary-fg-color); + text-decoration-thickness: 2px; + text-underline-offset: 0.3125em; +} + +.global-footer__links{ + clear: both; + list-style-type: none!important; + margin: 0 0 4px!important; + padding: 0; +} + +.global-footer__links>li{ + display: inline-block; + margin-left: 5px!important; + margin-right: 5px!important; + margin-bottom: 0px!important; + padding-right: 10px; + position: relative; +} + +.global-footer__links>li:last-child:after { + display: none +} + +.global-footer__links>li>a { + color: #666; + display: inline-block; + font-size: 1pc; + font-weight: 100; + line-height: 24px; + text-decoration: none; +} + +.global-footer__links>li:after{ + background: #666; + content: " "; + height: 14px; + position: absolute; + right: 0; + top: 5px; + width: 1px; +} + +.global-footer__copyright{ + clear: both; + color: #666; + font-size: 9pt; + line-height: 1pc; +} \ No newline at end of file diff --git a/css/nvidia-font.css b/css/nvidia-font.css new file mode 100644 index 00000000..94c8892d --- /dev/null +++ b/css/nvidia-font.css @@ -0,0 +1,63 @@ +/* + ============================================= + NVIDIA Sans fonts + Owner: Vadim Leontjev (vleontjev@nvidia.com) + ============================================= +*/ + + + + +/* Set up for old browsers*/ +@supports not (font-variation-settings: normal) { + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Lt.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Lt.woff2") format("woff2"); + font-weight: 300; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Rg.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Rg.woff2") format("woff2"); + font-weight: 400; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Md.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Md.woff2") format("woff2"); + font-weight: 500; + font-style: normal; + } + @font-face { + font-family: "NVIDIA-NALA"; + src: url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Bd.woff") format("woff"), + url("https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/NVIDIASans_NALA_W_Bd.woff2") format("woff2"); + font-weight: 700; + font-style: normal; + } +} + +/* Set up for modern browsers, all weights */ +@supports (font-variation-settings: normal) { + @font-face { + font-family: 'NVIDIA-NALA'; + src: url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_NALA_W_Wght.woff2') format('woff2 supports variations'), + url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_NALA_W_Wght.woff2') format('woff2-variations'); + font-weight: 100 1000; + font-stretch: 25% 151%; + font-style: normal; + font-display: swap; + } + @font-face { + font-family: 'NVIDIA-NALA'; + src: url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_Wght_NALA_W_Italic.woff2') format('woff2 supports variations'), + url('https://images.nvidia.com/etc/designs/nvidiaGDC/clientlibs_base/fonts/nvidia-sans/NALA/var/NVIDIASansVF_Wght_NALA_W_Italic.woff2') format('woff2-variations'); + font-weight: 100 1000; + font-stretch: 25% 151%; + font-style: italic; + font-display: swap; + } +} \ No newline at end of file diff --git a/getting-started.html b/getting-started.html new file mode 100644 index 00000000..2223ad1c --- /dev/null +++ b/getting-started.html @@ -0,0 +1,1231 @@ + + + + + + + + + + + + + + + + + + + + + Getting started - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Getting started

+ + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hello_ai_world.html b/hello_ai_world.html new file mode 100644 index 00000000..5e11369b --- /dev/null +++ b/hello_ai_world.html @@ -0,0 +1,1253 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Hello AI World - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Hello AI World

+

Hello AI World is an in-depth tutorial series for DNN-based inference and training of image classification, object detection, semantic segmentation, and more. It is built on the jetson-inference library using TensorRT for optimized performance on Jetson.

+

+

It's highly recommended to familiarize yourself with the concepts of machine learning and computer vision before diving into the more advanced topics of generative AI here on the Jetson AI Lab. Many of these models will prove useful to have during your development.

+

+

+

HELLO AI WORLD >> https://github.com/dusty-nv/jetson-inference

+ + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/images/Chrome_ERR_CERT.png b/images/Chrome_ERR_CERT.png new file mode 100644 index 00000000..81d1841c Binary files /dev/null and b/images/Chrome_ERR_CERT.png differ diff --git a/images/Chrome_ERR_CERT_after_advanced.png b/images/Chrome_ERR_CERT_after_advanced.png new file mode 100644 index 00000000..9cc776a7 Binary files /dev/null and b/images/Chrome_ERR_CERT_after_advanced.png differ diff --git a/images/JON-with-Gcube.png b/images/JON-with-Gcube.png new file mode 100644 index 00000000..590f3b1f Binary files /dev/null and b/images/JON-with-Gcube.png differ diff --git a/images/JON_Gen-AI-panels.png b/images/JON_Gen-AI-panels.png new file mode 100644 index 00000000..498dd306 Binary files /dev/null and b/images/JON_Gen-AI-panels.png differ diff --git a/images/JON_Gen-AI-panels_1024px.jpg b/images/JON_Gen-AI-panels_1024px.jpg new file mode 100644 index 00000000..30458936 Binary files /dev/null and b/images/JON_Gen-AI-panels_1024px.jpg differ diff --git a/images/JON_Gen-AI-panels_1024px.png b/images/JON_Gen-AI-panels_1024px.png new file mode 100644 index 00000000..58251f72 Binary files /dev/null and b/images/JON_Gen-AI-panels_1024px.png differ diff --git a/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png b/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png new file mode 100644 index 00000000..ad9930cd Binary files /dev/null and b/images/NVIDIA-JetsonOrin-3QTR-Front-Left_800px.png differ diff --git a/images/TAM_15s_1080p.mp4 b/images/TAM_15s_1080p.mp4 new file mode 100644 index 00000000..671caa58 Binary files /dev/null and b/images/TAM_15s_1080p.mp4 differ diff --git a/images/TAM_screencast_cat_720p-80pcnt.mp4 b/images/TAM_screencast_cat_720p-80pcnt.mp4 new file mode 100644 index 00000000..bc67c630 Binary files /dev/null and b/images/TAM_screencast_cat_720p-80pcnt.mp4 differ diff --git a/images/TAM_screenshot.png b/images/TAM_screenshot.png new file mode 100644 index 00000000..2f378730 Binary files /dev/null and b/images/TAM_screenshot.png differ diff --git a/images/TAM_screenshot_cat.png b/images/TAM_screenshot_cat.png new file mode 100644 index 00000000..2c35f3f2 Binary files /dev/null and b/images/TAM_screenshot_cat.png differ diff --git a/images/audio-generation-2560x1440.jpg b/images/audio-generation-2560x1440.jpg new file mode 100644 index 00000000..be0afe85 Binary files /dev/null and b/images/audio-generation-2560x1440.jpg differ diff --git a/images/audiocraft_jupyterlab_demo.png b/images/audiocraft_jupyterlab_demo.png new file mode 100644 index 00000000..a585641b Binary files /dev/null and b/images/audiocraft_jupyterlab_demo.png differ diff --git a/images/cube_black.png b/images/cube_black.png new file mode 100644 index 00000000..5e5ed000 Binary files /dev/null and b/images/cube_black.png differ diff --git a/images/cube_green.png b/images/cube_green.png new file mode 100644 index 00000000..d2627f4d Binary files /dev/null and b/images/cube_green.png differ diff --git a/images/cube_white.png b/images/cube_white.png new file mode 100644 index 00000000..c8ed7148 Binary files /dev/null and b/images/cube_white.png differ diff --git a/images/distillation-2560x1440.jpg b/images/distillation-2560x1440.jpg new file mode 100644 index 00000000..3b62a41d Binary files /dev/null and b/images/distillation-2560x1440.jpg differ diff --git a/images/efficientvit_sam_demo.png b/images/efficientvit_sam_demo.png new file mode 100644 index 00000000..fbd0c71d Binary files /dev/null and b/images/efficientvit_sam_demo.png differ diff --git a/images/favicon.ico b/images/favicon.ico new file mode 100644 index 00000000..424df872 Binary files /dev/null and b/images/favicon.ico differ diff --git a/images/favicon_black.png b/images/favicon_black.png new file mode 100644 index 00000000..84d37802 Binary files /dev/null and b/images/favicon_black.png differ diff --git a/images/favicon_darkbluebg.png b/images/favicon_darkbluebg.png new file mode 100644 index 00000000..c11099a7 Binary files /dev/null and b/images/favicon_darkbluebg.png differ diff --git a/images/favicon_darkgreen.png b/images/favicon_darkgreen.png new file mode 100644 index 00000000..5bc77934 Binary files /dev/null and b/images/favicon_darkgreen.png differ diff --git a/images/favicon_darkgreenbg.png b/images/favicon_darkgreenbg.png new file mode 100644 index 00000000..beefc9e8 Binary files /dev/null and b/images/favicon_darkgreenbg.png differ diff --git a/images/favicon_navy.png b/images/favicon_navy.png new file mode 100644 index 00000000..fe0e2034 Binary files /dev/null and b/images/favicon_navy.png differ diff --git a/images/graph_llm-text-generation.png b/images/graph_llm-text-generation.png new file mode 100644 index 00000000..41153cc5 Binary files /dev/null and b/images/graph_llm-text-generation.png differ diff --git a/images/graph_llm-text-generation.svg b/images/graph_llm-text-generation.svg new file mode 100644 index 00000000..c453871d --- /dev/null +++ b/images/graph_llm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/graph_vit-vision-transformers.svg b/images/graph_vit-vision-transformers.svg new file mode 100644 index 00000000..0daa5681 --- /dev/null +++ b/images/graph_vit-vision-transformers.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/graph_vlm-text-generation.svg b/images/graph_vlm-text-generation.svg new file mode 100644 index 00000000..d10efa63 --- /dev/null +++ b/images/graph_vlm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/icon_NanoDB_512px.png b/images/icon_NanoDB_512px.png new file mode 100644 index 00000000..505b6d09 Binary files /dev/null and b/images/icon_NanoDB_512px.png differ diff --git a/images/icon_NanoSAM.png b/images/icon_NanoSAM.png new file mode 100644 index 00000000..e9240b8d Binary files /dev/null and b/images/icon_NanoSAM.png differ diff --git a/images/icon_NanoSAM_512px.jpg b/images/icon_NanoSAM_512px.jpg new file mode 100644 index 00000000..19ab6d48 Binary files /dev/null and b/images/icon_NanoSAM_512px.jpg differ diff --git a/images/icon_NanoSAM_512px.png b/images/icon_NanoSAM_512px.png new file mode 100644 index 00000000..335f97fb Binary files /dev/null and b/images/icon_NanoSAM_512px.png differ diff --git a/images/icon_diffusion.png b/images/icon_diffusion.png new file mode 100644 index 00000000..f9cb33e6 Binary files /dev/null and b/images/icon_diffusion.png differ diff --git a/images/icon_diffusion_512px.jpg b/images/icon_diffusion_512px.jpg new file mode 100644 index 00000000..07b1d1df Binary files /dev/null and b/images/icon_diffusion_512px.jpg differ diff --git a/images/icon_diffusion_512px.png b/images/icon_diffusion_512px.png new file mode 100644 index 00000000..2c3dc683 Binary files /dev/null and b/images/icon_diffusion_512px.png differ diff --git a/images/icon_distillation.png b/images/icon_distillation.png new file mode 100644 index 00000000..6051b62e Binary files /dev/null and b/images/icon_distillation.png differ diff --git a/images/icon_text-generation_512px.jpg b/images/icon_text-generation_512px.jpg new file mode 100644 index 00000000..934b3c9d Binary files /dev/null and b/images/icon_text-generation_512px.jpg differ diff --git a/images/icon_text-generation_512px.png b/images/icon_text-generation_512px.png new file mode 100644 index 00000000..1435ac81 Binary files /dev/null and b/images/icon_text-generation_512px.png differ diff --git a/images/icon_text-generation_iso.png b/images/icon_text-generation_iso.png new file mode 100644 index 00000000..2458ec87 Binary files /dev/null and b/images/icon_text-generation_iso.png differ diff --git a/images/icon_vision-language-model.png b/images/icon_vision-language-model.png new file mode 100644 index 00000000..23a60e2d Binary files /dev/null and b/images/icon_vision-language-model.png differ diff --git a/images/icon_vision-language-model.png:Zone.Identifier b/images/icon_vision-language-model.png:Zone.Identifier new file mode 100644 index 00000000..bbbaf191 --- /dev/null +++ b/images/icon_vision-language-model.png:Zone.Identifier @@ -0,0 +1,3 @@ +[ZoneTransfer] +ZoneId=3 +HostUrl=https://www.figma.com/ diff --git a/images/icon_vision-language-model_512px.jpg b/images/icon_vision-language-model_512px.jpg new file mode 100644 index 00000000..7eb80d37 Binary files /dev/null and b/images/icon_vision-language-model_512px.jpg differ diff --git a/images/icon_vision-language-model_512px.png b/images/icon_vision-language-model_512px.png new file mode 100644 index 00000000..683e0eb6 Binary files /dev/null and b/images/icon_vision-language-model_512px.png differ diff --git a/images/iconnv_audio-generation_512x512.jpg b/images/iconnv_audio-generation_512x512.jpg new file mode 100644 index 00000000..dc6446b2 Binary files /dev/null and b/images/iconnv_audio-generation_512x512.jpg differ diff --git a/images/iconnv_distillation_512x512.jpg b/images/iconnv_distillation_512x512.jpg new file mode 100644 index 00000000..2ffdcd9a Binary files /dev/null and b/images/iconnv_distillation_512x512.jpg differ diff --git a/images/iconnv_image-generation_512x512.jpg b/images/iconnv_image-generation_512x512.jpg new file mode 100644 index 00000000..c4659307 Binary files /dev/null and b/images/iconnv_image-generation_512x512.jpg differ diff --git a/images/iconnv_llamaspeak_512x512.jpg b/images/iconnv_llamaspeak_512x512.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/images/iconnv_llamaspeak_512x512.jpg differ diff --git a/images/iconnv_nanodb_512x512.jpg b/images/iconnv_nanodb_512x512.jpg new file mode 100644 index 00000000..0e8d1340 Binary files /dev/null and b/images/iconnv_nanodb_512x512.jpg differ diff --git a/images/iconnv_nanosam_512x512.jpg b/images/iconnv_nanosam_512x512.jpg new file mode 100644 index 00000000..2a4e2456 Binary files /dev/null and b/images/iconnv_nanosam_512x512.jpg differ diff --git a/images/iconnv_text-generation_512x512.jpg b/images/iconnv_text-generation_512x512.jpg new file mode 100644 index 00000000..2feebd5c Binary files /dev/null and b/images/iconnv_text-generation_512x512.jpg differ diff --git a/images/iconnv_text-vision_512x512.jpg b/images/iconnv_text-vision_512x512.jpg new file mode 100644 index 00000000..dc04a1f6 Binary files /dev/null and b/images/iconnv_text-vision_512x512.jpg differ diff --git a/images/image-generation-2560x1440.jpg b/images/image-generation-2560x1440.jpg new file mode 100644 index 00000000..2b14d92b Binary files /dev/null and b/images/image-generation-2560x1440.jpg differ diff --git a/images/japanese-stable-diffusion.jpg b/images/japanese-stable-diffusion.jpg new file mode 100644 index 00000000..66fd4fdf Binary files /dev/null and b/images/japanese-stable-diffusion.jpg differ diff --git a/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png b/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png new file mode 100644 index 00000000..f926cf59 Binary files /dev/null and b/images/jetson-agx-orin-dev-kit-3qtr-front-right-reverse_800px.png differ diff --git a/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png b/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png new file mode 100644 index 00000000..a442c0a1 Binary files /dev/null and b/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png differ diff --git a/images/jon-iso_1200x900.png b/images/jon-iso_1200x900.png new file mode 100644 index 00000000..ca6ad442 Binary files /dev/null and b/images/jon-iso_1200x900.png differ diff --git a/images/llamaspeak-2560x1440.jpg b/images/llamaspeak-2560x1440.jpg new file mode 100644 index 00000000..dc921b5b Binary files /dev/null and b/images/llamaspeak-2560x1440.jpg differ diff --git a/images/m48-document-support-guide-256px-blk.png b/images/m48-document-support-guide-256px-blk.png new file mode 100644 index 00000000..e00c504f Binary files /dev/null and b/images/m48-document-support-guide-256px-blk.png differ diff --git a/images/m48-document-support-guide-256px-grn.png b/images/m48-document-support-guide-256px-grn.png new file mode 100644 index 00000000..b3ffa21e Binary files /dev/null and b/images/m48-document-support-guide-256px-grn.png differ diff --git a/images/m48-document-support-guide-256px-wht.png b/images/m48-document-support-guide-256px-wht.png new file mode 100644 index 00000000..2f158112 Binary files /dev/null and b/images/m48-document-support-guide-256px-wht.png differ diff --git a/images/m48-jetson-nano-256px-blk.png b/images/m48-jetson-nano-256px-blk.png new file mode 100644 index 00000000..8fa652de Binary files /dev/null and b/images/m48-jetson-nano-256px-blk.png differ diff --git a/images/m48-jetson-nano-256px-grn.png b/images/m48-jetson-nano-256px-grn.png new file mode 100644 index 00000000..5fb0097e Binary files /dev/null and b/images/m48-jetson-nano-256px-grn.png differ diff --git a/images/m48-jetson-nano-256px-wht.png b/images/m48-jetson-nano-256px-wht.png new file mode 100644 index 00000000..0d07589c Binary files /dev/null and b/images/m48-jetson-nano-256px-wht.png differ diff --git a/images/minigpt4_gleaners.gif b/images/minigpt4_gleaners.gif new file mode 100644 index 00000000..484d2994 Binary files /dev/null and b/images/minigpt4_gleaners.gif differ diff --git a/images/mmj_android.jpg b/images/mmj_android.jpg new file mode 100644 index 00000000..92c69a0a Binary files /dev/null and b/images/mmj_android.jpg differ diff --git a/images/mmj_app.gif b/images/mmj_app.gif new file mode 100644 index 00000000..71f6f5db Binary files /dev/null and b/images/mmj_app.gif differ diff --git a/images/mmj_livestream.gif b/images/mmj_livestream.gif new file mode 100644 index 00000000..db02d3d5 Binary files /dev/null and b/images/mmj_livestream.gif differ diff --git a/images/mmj_streamer.png b/images/mmj_streamer.png new file mode 100644 index 00000000..576166af Binary files /dev/null and b/images/mmj_streamer.png differ diff --git a/images/mmj_tutorial.gif b/images/mmj_tutorial.gif new file mode 100644 index 00000000..d1e188d5 Binary files /dev/null and b/images/mmj_tutorial.gif differ diff --git a/images/mmj_vlc.gif b/images/mmj_vlc.gif new file mode 100644 index 00000000..d58662a0 Binary files /dev/null and b/images/mmj_vlc.gif differ diff --git a/images/mmj_vst.png b/images/mmj_vst.png new file mode 100644 index 00000000..b0645437 Binary files /dev/null and b/images/mmj_vst.png differ diff --git a/images/nano_llm_docs.jpg b/images/nano_llm_docs.jpg new file mode 100644 index 00000000..d024381b Binary files /dev/null and b/images/nano_llm_docs.jpg differ diff --git a/images/nano_llm_docs_chat.jpg b/images/nano_llm_docs_chat.jpg new file mode 100644 index 00000000..634e3618 Binary files /dev/null and b/images/nano_llm_docs_chat.jpg differ diff --git a/images/nanodb-2560x1440.jpg b/images/nanodb-2560x1440.jpg new file mode 100644 index 00000000..a03e9e40 Binary files /dev/null and b/images/nanodb-2560x1440.jpg differ diff --git a/images/nanoowl_chrome_window.png b/images/nanoowl_chrome_window.png new file mode 100644 index 00000000..5000052e Binary files /dev/null and b/images/nanoowl_chrome_window.png differ diff --git a/images/nanosam-2560x1440.jpg b/images/nanosam-2560x1440.jpg new file mode 100644 index 00000000..355ffd29 Binary files /dev/null and b/images/nanosam-2560x1440.jpg differ diff --git a/images/nvicon_llamaspeak_1200x1200.jpg b/images/nvicon_llamaspeak_1200x1200.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/images/nvicon_llamaspeak_1200x1200.jpg differ diff --git a/images/nvidia-favicon-rgb-16x16px@2x.png b/images/nvidia-favicon-rgb-16x16px@2x.png new file mode 100644 index 00000000..e81621c7 Binary files /dev/null and b/images/nvidia-favicon-rgb-16x16px@2x.png differ diff --git a/images/nvidia_logo_white.png b/images/nvidia_logo_white.png new file mode 100644 index 00000000..777219df Binary files /dev/null and b/images/nvidia_logo_white.png differ diff --git a/images/nvidia_logo_white_60.png b/images/nvidia_logo_white_60.png new file mode 100644 index 00000000..f0f9ac86 Binary files /dev/null and b/images/nvidia_logo_white_60.png differ diff --git a/images/nvidia_logo_white_65.png b/images/nvidia_logo_white_65.png new file mode 100644 index 00000000..9055574b Binary files /dev/null and b/images/nvidia_logo_white_65.png differ diff --git a/images/nvidia_logo_white_70.png b/images/nvidia_logo_white_70.png new file mode 100644 index 00000000..1e73a68c Binary files /dev/null and b/images/nvidia_logo_white_70.png differ diff --git a/images/sam_notebook.png b/images/sam_notebook.png new file mode 100644 index 00000000..afe3a4d5 Binary files /dev/null and b/images/sam_notebook.png differ diff --git a/images/slm_console.gif b/images/slm_console.gif new file mode 100644 index 00000000..db3bb2bb Binary files /dev/null and b/images/slm_console.gif differ diff --git a/images/slm_console_2.gif b/images/slm_console_2.gif new file mode 100644 index 00000000..655023d4 Binary files /dev/null and b/images/slm_console_2.gif differ diff --git a/images/stable-diffusion-webui_green-web.gif b/images/stable-diffusion-webui_green-web.gif new file mode 100644 index 00000000..8db8b840 Binary files /dev/null and b/images/stable-diffusion-webui_green-web.gif differ diff --git a/images/stable-diffusion-xl-central-park.jpg b/images/stable-diffusion-xl-central-park.jpg new file mode 100644 index 00000000..2028dcec Binary files /dev/null and b/images/stable-diffusion-xl-central-park.jpg differ diff --git a/images/stable-diffusion-xl-kids-workshop.jpg b/images/stable-diffusion-xl-kids-workshop.jpg new file mode 100644 index 00000000..dc14a01c Binary files /dev/null and b/images/stable-diffusion-xl-kids-workshop.jpg differ diff --git a/images/stable-diffusion-xl-model-select.jpg b/images/stable-diffusion-xl-model-select.jpg new file mode 100644 index 00000000..301e5493 Binary files /dev/null and b/images/stable-diffusion-xl-model-select.jpg differ diff --git a/images/stable-diffusion-xl-refiner-settings.jpg b/images/stable-diffusion-xl-refiner-settings.jpg new file mode 100644 index 00000000..74958b76 Binary files /dev/null and b/images/stable-diffusion-xl-refiner-settings.jpg differ diff --git a/images/stable-diffusion-xl-robot-generation.jpg b/images/stable-diffusion-xl-robot-generation.jpg new file mode 100644 index 00000000..ea8abefd Binary files /dev/null and b/images/stable-diffusion-xl-robot-generation.jpg differ diff --git a/images/stable-diffusion-xl-robot-mountain.jpg b/images/stable-diffusion-xl-robot-mountain.jpg new file mode 100644 index 00000000..0045138b Binary files /dev/null and b/images/stable-diffusion-xl-robot-mountain.jpg differ diff --git a/images/stable-diffusion-xl-robot-terminal.jpg b/images/stable-diffusion-xl-robot-terminal.jpg new file mode 100644 index 00000000..719abcac Binary files /dev/null and b/images/stable-diffusion-xl-robot-terminal.jpg differ diff --git a/images/stable-diffusion-xl-tokyo-gardens.jpg b/images/stable-diffusion-xl-tokyo-gardens.jpg new file mode 100644 index 00000000..a21078cf Binary files /dev/null and b/images/stable-diffusion-xl-tokyo-gardens.jpg differ diff --git a/images/stable-diffusion_space-ferret.png b/images/stable-diffusion_space-ferret.png new file mode 100644 index 00000000..48964e95 Binary files /dev/null and b/images/stable-diffusion_space-ferret.png differ diff --git a/images/text-generation-2560x1440.jpg b/images/text-generation-2560x1440.jpg new file mode 100644 index 00000000..e97a466f Binary files /dev/null and b/images/text-generation-2560x1440.jpg differ diff --git a/images/text-generation-webui_sf-trip.gif b/images/text-generation-webui_sf-trip.gif new file mode 100644 index 00000000..f5c4ed71 Binary files /dev/null and b/images/text-generation-webui_sf-trip.gif differ diff --git a/images/text-vision-2560x1440.jpg b/images/text-vision-2560x1440.jpg new file mode 100644 index 00000000..45293d14 Binary files /dev/null and b/images/text-vision-2560x1440.jpg differ diff --git a/images/tgwui_Download-model.png b/images/tgwui_Download-model.png new file mode 100644 index 00000000..2f0245a0 Binary files /dev/null and b/images/tgwui_Download-model.png differ diff --git a/images/tgwui_llava_drag-n-drop_birds.gif b/images/tgwui_llava_drag-n-drop_birds.gif new file mode 100644 index 00000000..a1030d12 Binary files /dev/null and b/images/tgwui_llava_drag-n-drop_birds.gif differ diff --git a/images/tgwui_model-download-animation.gif b/images/tgwui_model-download-animation.gif new file mode 100644 index 00000000..4e69e8e6 Binary files /dev/null and b/images/tgwui_model-download-animation.gif differ diff --git a/images/tgwui_multimodal_llava_fish.jpg b/images/tgwui_multimodal_llava_fish.jpg new file mode 100644 index 00000000..1bc00b0e Binary files /dev/null and b/images/tgwui_multimodal_llava_fish.jpg differ diff --git a/images/tgwui_multimodal_llava_spacewalk.png b/images/tgwui_multimodal_llava_spacewalk.png new file mode 100644 index 00000000..7db6a4a0 Binary files /dev/null and b/images/tgwui_multimodal_llava_spacewalk.png differ diff --git a/images/whisper_ipywebrtc_widget.png b/images/whisper_ipywebrtc_widget.png new file mode 100644 index 00000000..8c28359b Binary files /dev/null and b/images/whisper_ipywebrtc_widget.png differ diff --git a/images/whisper_jupyterlab_notebooks.png b/images/whisper_jupyterlab_notebooks.png new file mode 100644 index 00000000..e6eca2e4 Binary files /dev/null and b/images/whisper_jupyterlab_notebooks.png differ diff --git a/images/whisper_microphone_access.png b/images/whisper_microphone_access.png new file mode 100644 index 00000000..70c87fe8 Binary files /dev/null and b/images/whisper_microphone_access.png differ diff --git a/images/whisper_transcribe_result.png b/images/whisper_transcribe_result.png new file mode 100644 index 00000000..93bf87d2 Binary files /dev/null and b/images/whisper_transcribe_result.png differ diff --git a/images/whisper_web_setting.png b/images/whisper_web_setting.png new file mode 100644 index 00000000..5736db66 Binary files /dev/null and b/images/whisper_web_setting.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..68e054b2 --- /dev/null +++ b/index.html @@ -0,0 +1,1857 @@ + + + + + + + + + + + + + + + + + + + + + + + + Home - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + + + + + +
+
+
+
+

Generative AI at the Edge

+

Bring generative AI to the world with NVIDIA® Jetson™

+ + Explore Tutorials + + + Walkthrough + +
+
+ +
+
+
+
+ + +
+ +
+ + + + +
+
+
+ + +
+ + + +
+
+ + + +
+
+ + + +
+ Learn More +
+
+
+ + + + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/overrides/assets/style.css b/overrides/assets/style.css new file mode 100644 index 00000000..13fedc48 --- /dev/null +++ b/overrides/assets/style.css @@ -0,0 +1,879 @@ +:root { + --primary-color: #02007e; + --body-color: #f9f9f9; + --text-color: #636363; + --text-color-dark: #242738; + --white-color: #ffffff; + --light-color: #f8f9fa; + --font-family: Lato; +} + +body { + line-height: 1.5; + font-family: var(--font-family), sans-serif; + -webkit-font-smoothing: antialiased; + font-size: 17px; + color: var(--text-color); + background-color: var(--body-color) +} + +p { + color: var(--text-color); + font-size: 15px +} + +h1, +h2, +h3, +h4, +h5, +h6 { + color: var(--text-color-dark); + font-family: var(--font-family), sans-serif; + font-weight: 700; + line-height: 1.2 +} + +h1, +.h1 { + font-size: 45px +} + +h2, +.h2 { + font-size: 32px +} + +h3, +.h3 { + font-size: 26px +} + +h4, +.h4 { + font-size: 20px +} + +h5, +.h5 { + font-size: 18px +} + +h6, +.h6 { + font-size: 14px +} + +.btn { + font-size: 14px; + font-family: var(--font-family), sans-serif; + text-transform: uppercase; + padding: 16px 44px; + border-radius: 0; + font-weight: 600; + border: 0; + position: relative; + z-index: 1; + transition: .2s ease +} + +.btn:focus { + outline: 0; + box-shadow: none !important +} + +.btn:active { + box-shadow: none +} + +.btn-primary { + background: var(--primary-color); + color: var(--white-color) +} + +.btn-primary:active { + background: var(--primary-color) +} + +.btn-primary:hover { + background: var(--primary-color) +} + +.btn-primary:not(:disabled):not(.disabled).active, +.btn-primary:not(:disabled):not(.disabled):active, +.show>.btn-primary.dropdown-toggle { + color: var(--white-color); + background-color: var(--primary-color); + border-color: var(--primary-color) +} + +.inline-button { + line-height: .8rem !important; + padding: 5px 8px !important; + pointer-events: none; + margin-top: -5px +} + +.overflow-hidden { + overflow: hidden !important +} + +::-moz-selection { + background: var(--primary-color); + color: var(--white-color) +} + +::selection { + background: var(--primary-color); + color: var(--white-color) +} + +.preloader { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--white-color); + z-index: 999; + display: flex; + align-items: center; + justify-content: center +} + +ul { + list-style-type: none; + margin: 0; + padding-left: 0; + font-size: 15px +} + +ol { + padding-left: 20px; + font-size: 15px +} + +img { + vertical-align: middle; + border: 0 +} + +a, +a:hover, +a:focus { + text-decoration: none; + color: var(--primary-color) +} + +a:hover, +a:focus { + color: var(--primary-color) +} + +a, +button, +select { + cursor: pointer; + transition: .2s ease +} + +a:focus, +button:focus, +select:focus { + outline: 0 +} + +.slick-slide { + outline: 0 +} + +.section { + padding-top: 80px; + padding-bottom: 80px +} + +@media(max-width:768px) { + .section { + padding-top: 60px + } +} + +.section-sm { + padding-top: 60px; + padding-bottom: 60px +} + +@media(max-width:768px) { + .section-sm { + padding-top: 40px + } +} + +.section-title { + margin-bottom: 40px +} + +.bg-cover { + background-size: cover; + background-position: 50%; + background-repeat: no-repeat +} + +.border-primary { + border-color: #f2f2f2 !important +} + +pre { + padding: 20px +} + +.overlay { + position: relative +} + +.overlay::before { + position: absolute; + content: ''; + height: 100%; + width: 100%; + top: 0; + left: 0; + background: var(--primary-color); + opacity: .8 +} + +.outline-0 { + outline: 0 !important +} + +.d-unset { + display: unset !important +} + +.bg-primary { + background: var(--primary-color) !important +} + +.bg-white { + background-color: var(--white-color) !important +} + +.bg-light { + background-color: var(--light-color) !important +} + +.text-primary { + color: var(--primary-color) !important +} + +.text-color { + color: var(--text-color) !important +} + +.text-dark { + color: var(--text-color-dark) !important +} + +.text-white { + color: var(--white-color) !important +} + +.top-50 { + top: 50px +} + +.navbar { + padding: 0 +} + +@media(max-width:768px) { + .navbar { + padding: 10px 0 + } +} + +.navbar-brand img { + max-width: 100px; + margin-bottom: 0 +} + +.navbar .nav-item .nav-link { + text-transform: uppercase; + padding: 10px 15px !important; + font-size: 15px +} + +.navbar .dropdown:hover .dropdown-menu { + visibility: visible; + opacity: 1; + -webkit-transform: scaleX(1); + transform: scaleX(1) +} + +.navbar .dropdown-menu { + box-shadow: 0 3px 9px 0 rgba(0, 0, 0, .12); + padding: 15px 0; + border: 0; + top: 40px; + left: -35px; + border-radius: 0; + display: block; + visibility: hidden; + transition: .3s ease; + opacity: 0; + -webkit-transform: translateY(20px); + transform: translateY(20px); + background: var(--white-color) +} + +@media(max-width:768px) { + .navbar .dropdown-menu { + display: none; + opacity: 1; + visibility: visible; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: unset; + transform-origin: unset + } +} + +.navbar .dropdown-menu.view { + visibility: visible !important; + opacity: 1; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: top; + transform-origin: top +} + +@media(max-width:768px) { + .navbar .dropdown-menu.view { + display: block + } +} + +.navbar .dropdown-menu.show { + visibility: hidden +} + +@media(max-width:768px) { + .navbar .dropdown-menu.show { + visibility: visible; + display: block + } +} + +.navbar .dropdown-item { + position: relative; + color: var(--text-color-dark); + transition: .2s ease; + font-family: var(--font-family), sans-serif +} + +@media(max-width:768px) { + .navbar .dropdown-item { + text-align: center + } +} + +.navbar .dropdown-item:hover { + color: var(--primary-color); + background: 0 0 +} + +.lang-list { + background: var(--primary-color); + color: var(--white-color) +} + +.lang-list.dark { + color: var(--text-color-dark); + background: var(--white-color) +} + +.banner { + overflow: hidden +} + +.banner p { + font-size: 20px; + opacity: .8 +} + +.banner .nav-link.text-dark { + color: var(--white-color) !important +} + +.banner .nav-link.text-dark:hover { + color: var(--white-color) !important +} + +.banner .navbar-brand { + color: var(--white-color) !important +} + +#project-icon { + float: left; + height: 32px; + width: 32px +} + +#project-description { + margin: 0; + padding: 0 +} + +.ui-helper-hidden-accessible { + display: none +} + +.ui-menu { + background: var(--white-color); + padding: 5px 20px 20px; + right: 0 !important; + max-height: 200px; + overflow: hidden; + border-radius: 0 0 25px 25px; + z-index: 9999; + box-shadow: 0 13px 20px 0 rgba(0, 0, 0, .07) +} + +@media(max-width:575px) { + .ui-menu { + width: calc(100% - 30px) !important + } +} + +@media(min-width:576px) { + .ui-menu { + max-width: 510px !important + } +} + +@media(min-width:768px) { + .ui-menu { + max-width: 690px !important + } +} + +@media(min-width:992px) { + .ui-menu { + max-width: 610px !important + } +} + +@media(min-width:1200px) { + .ui-menu { + max-width: 730px !important + } +} + +.ui-menu-item a { + color: var(--text-color); + padding: 8px 0; + font-size: 15px +} + +.ui-menu-item a:hover { + color: var(--primary-color) +} + +.ui-menu-item:not(:last-child) { + border-bottom: 1px solid #e8e8e8 +} + +.ui-menu-item * { + display: none +} + +.ui-menu-item .ui-corner-all { + display: block +} + +.form-control { + height: 50px; + border-radius: 25px; + border: 0; + padding: 0 20px +} + +.form-control:focus { + border: 0; + box-shadow: none !important +} + +textarea.form-control { + height: 150px; + padding: 20px +} + +.icon { + font-size: 40px +} + +.shadow { + box-shadow: 0 5px 15px rgba(0, 0, 0, .07) !important; + transition: .3s ease +} + +.shadow-bottom { + box-shadow: 0 1px 0 rgba(12, 13, 14, .1), 0 1px 6px rgba(59, 64, 69, .1) +} + +.shadow:hover, +.shadow:focus { + box-shadow: 0 14px 25px rgba(0, 0, 0, .1) !important +} + +.content * { + margin-bottom: 20px +} + +.content img { + max-width: 100%; + height: auto; + margin: 0 auto 15px; + display: block; + text-align: center +} + +.content ul { + padding-left: 0; + margin-bottom: 20px +} + +.content ul li { + padding-left: 20px; + position: relative +} + +.content ul li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 8px +} + +.list-styled li { + padding-left: 20px; + position: relative +} + +.list-styled li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 17px +} + +.post-meta { + color: var(--text-color); + font-style: italic; + font-size: 14px +} + +blockquote { + font-size: 20px !important; + color: var(--text-color-dark); + padding: 20px 40px; + border-left: 2px solid var(--primary-color); + margin: 40px 0; + font-weight: 700; + background: var(--light-color) +} + +blockquote p { + margin-bottom: 0 !important +} + +.pagination { + justify-content: space-between +} + +.pagination a { + color: var(--primary-color) +} + +.pagination i { + font-size: 15px; + line-height: 1.8 +} + +#accordion i { + font-size: 14px; + line-height: 2 +} + +table { + text-align: left; + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + border: 1px solid #dee2e6 +} + +table td, +table th { + padding: .75rem; + vertical-align: top; + border: 1px solid #dee2e6; + margin-bottom: 0 +} + +thead { + background: #ececec; + margin-bottom: 0 +} + +tbody { + background: #f8f8f8; + margin-bottom: 0 +} + +.notices { + margin: 2rem 0; + position: relative +} + +.notices p { + padding: 10px +} + +.notices p::before { + position: absolute; + top: 2px; + color: #fff; + font-family: themify; + font-weight: 900; + content: "\e717"; + left: 10px +} + +.notices.note p { + border-top: 30px solid #6ab0de; + background: #e7f2fa +} + +.notices.note p::after { + content: 'Note'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.tip p { + border-top: 30px solid #78c578; + background: #e6f9e6 +} + +.notices.tip p::after { + content: 'Tip'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.info p { + border-top: 30px solid #f0b37e; + background: #fff2db +} + +.notices.info p::after { + content: 'Info'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.warning p { + border-top: 30px solid #e06f6c; + background: #fae2e2 +} + +.notices.warning p::after { + content: 'Warning'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.sidebar { + background-color: var(--white-color); + position: sticky; + top: 50px; + margin-bottom: 30px; + padding: 40px 10px 20px +} + +.sidelist { + display: block +} + +li.sidelist>a { + margin-left: 20px; + margin-bottom: 10px; + display: block; + font-size: 20px +} + +li.sidelist li a { + margin: 0 +} + +.sidelist li.sidelist { + display: block +} + +.sidelist li.sidelist.active a { + color: var(--primary-color) +} + +.sidelist li.sidelist.active::before { + opacity: 1 +} + +.page-list li a { + display: none +} + +.page-list li ul a { + display: block +} + +.sidelist li a { + color: var(--text-color-dark); + display: block; + font-size: 15px; + font-weight: 500; + padding: 10px 0; + line-height: 1.4 +} + +p:empty, +p a:empty { + display: none !important +} + +pre { + display: block; + padding: 9.5px; + margin: 10px 0 +} + +code { + margin-bottom: 0 !important; + font-size: 100% +} + +.back-btn { + position: relative +} + +.back-btn::before { + position: absolute; + font-family: themify; + content: "\e6bc"; + font-size: 25px; + height: 30px; + width: 40px; + background-color: var(--white-color); + color: inherit; + text-align: right; + z-index: 1; + left: -5px; + top: -5px +} + +.ui-autocomplete-input { + border-bottom: 1px solid #d4d4d4 !important +} + +.ui-autocomplete-input.active { + border-bottom-left-radius: 0; + border-bottom-right-radius: 0 +} + +.search-icon { + position: absolute; + right: 20px; + top: 18px; + font-size: 14px +} + +i { + transition: .2s ease +} + +a:hover i { + color: var(--primary-color) +} + +pre code::-webkit-scrollbar { + height: 5px +} + +pre code::-webkit-scrollbar-track { + background: #000 +} + +pre code::-webkit-scrollbar-thumb { + background: #888 +} + +.code-tabs { + border: 1px solid #dee2e6; + overflow: hidden; + margin: 20px 0 +} + +.code-tabs .tab-content { + padding: 20px 15px; + margin-bottom: 0 +} + +.code-tabs .tab-content .tab-pane { + margin-bottom: 0 +} + +.code-tabs .nav-tabs { + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item { + padding-left: 0; + border-right: 1px solid #dee2e6 +} + +.code-tabs .nav-tabs .nav-item .nav-link { + text-decoration: none; + font-weight: 500; + border: 0; + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item::before { + display: none +} + +.code-tabs .nav-tabs .nav-item.active { + background: var(--primary-color) +} + +.code-tabs .nav-tabs .nav-item.active .nav-link { + color: var(--white-color) +} \ No newline at end of file diff --git a/overrides/home.html b/overrides/home.html new file mode 100644 index 00000000..aa65a4ee --- /dev/null +++ b/overrides/home.html @@ -0,0 +1,655 @@ + +{% extends "main.html" %} +{% block extrahead %} + + {{ super() }} +{% endblock %} +{% block tabs %} +{{ super() }} + + + +
+
+
+
+

Generative AI at the Edge

+

Bring generative AI to the world with NVIDIA® Jetson™

+ + Explore Tutorials + + + Walkthrough + +
+
+ +
+
+
+
+ + +
+ +
+ + + + +
+
+
+ + +
+ + + +
+
+ + + +
+
+ + + +
+ Learn More +
+
+
+ + + + +
+ +
+ + + + + +{% endblock %} +{% block content %} + +{% endblock %} +{% block footer %} + +{% endblock %} diff --git a/overrides/home_navtop.html b/overrides/home_navtop.html new file mode 100644 index 00000000..674b3fc7 --- /dev/null +++ b/overrides/home_navtop.html @@ -0,0 +1,301 @@ + + + + +{% extends "base.html" %} +{% block tabs %} +{{ super() }} + + + + +
+
+
+
+ +
+
+

UP42 Python SDK

+

Access UP42's geospatial collections and processing workflows via Python.

+ + Get started + + + Go to GitHub + +
+
+
+
+ + +
+
+

+ + + + UP42 in Python +

+

Use UP42 via Python: order geospatial data, run analytic workflows, and + generate insights.

+
+
+

+ + + Python ecosystem +

+

Use UP42 together with your preferred Python libraries.

+
+
+

+ + Visualizations +

+

Interactive maps and visualizations. Ideal to use with Jupyter notebooks.

+
+
+ +
+
+
+
+ + + + +{% endblock %} +{% block content %}{% endblock %} +{% block footer %}{% endblock %} \ No newline at end of file diff --git a/overrides/images/JON-with-Gcube.png b/overrides/images/JON-with-Gcube.png new file mode 100644 index 00000000..590f3b1f Binary files /dev/null and b/overrides/images/JON-with-Gcube.png differ diff --git a/overrides/images/JON_Gen-AI-panels.png b/overrides/images/JON_Gen-AI-panels.png new file mode 100644 index 00000000..498dd306 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels.png differ diff --git a/overrides/images/JON_Gen-AI-panels_1024px.jpg b/overrides/images/JON_Gen-AI-panels_1024px.jpg new file mode 100644 index 00000000..30458936 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels_1024px.jpg differ diff --git a/overrides/images/JON_Gen-AI-panels_1024px.png b/overrides/images/JON_Gen-AI-panels_1024px.png new file mode 100644 index 00000000..58251f72 Binary files /dev/null and b/overrides/images/JON_Gen-AI-panels_1024px.png differ diff --git a/overrides/images/audio-generation-2560x1440.jpg b/overrides/images/audio-generation-2560x1440.jpg new file mode 100644 index 00000000..be0afe85 Binary files /dev/null and b/overrides/images/audio-generation-2560x1440.jpg differ diff --git a/overrides/images/distillation-2560x1440.jpg b/overrides/images/distillation-2560x1440.jpg new file mode 100644 index 00000000..3b62a41d Binary files /dev/null and b/overrides/images/distillation-2560x1440.jpg differ diff --git a/overrides/images/graph_llm-text-generation.png b/overrides/images/graph_llm-text-generation.png new file mode 100644 index 00000000..41153cc5 Binary files /dev/null and b/overrides/images/graph_llm-text-generation.png differ diff --git a/overrides/images/graph_llm-text-generation.svg b/overrides/images/graph_llm-text-generation.svg new file mode 100644 index 00000000..c453871d --- /dev/null +++ b/overrides/images/graph_llm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/graph_vit-vision-transformers.svg b/overrides/images/graph_vit-vision-transformers.svg new file mode 100644 index 00000000..0daa5681 --- /dev/null +++ b/overrides/images/graph_vit-vision-transformers.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/graph_vlm-text-generation.svg b/overrides/images/graph_vlm-text-generation.svg new file mode 100644 index 00000000..d10efa63 --- /dev/null +++ b/overrides/images/graph_vlm-text-generation.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/overrides/images/icon_NanoDB_512px.png b/overrides/images/icon_NanoDB_512px.png new file mode 100644 index 00000000..505b6d09 Binary files /dev/null and b/overrides/images/icon_NanoDB_512px.png differ diff --git a/overrides/images/icon_NanoSAM.png b/overrides/images/icon_NanoSAM.png new file mode 100644 index 00000000..e9240b8d Binary files /dev/null and b/overrides/images/icon_NanoSAM.png differ diff --git a/overrides/images/icon_NanoSAM_512px.jpg b/overrides/images/icon_NanoSAM_512px.jpg new file mode 100644 index 00000000..19ab6d48 Binary files /dev/null and b/overrides/images/icon_NanoSAM_512px.jpg differ diff --git a/overrides/images/icon_NanoSAM_512px.png b/overrides/images/icon_NanoSAM_512px.png new file mode 100644 index 00000000..335f97fb Binary files /dev/null and b/overrides/images/icon_NanoSAM_512px.png differ diff --git a/overrides/images/icon_diffusion.png b/overrides/images/icon_diffusion.png new file mode 100644 index 00000000..f9cb33e6 Binary files /dev/null and b/overrides/images/icon_diffusion.png differ diff --git a/overrides/images/icon_diffusion_512px.jpg b/overrides/images/icon_diffusion_512px.jpg new file mode 100644 index 00000000..07b1d1df Binary files /dev/null and b/overrides/images/icon_diffusion_512px.jpg differ diff --git a/overrides/images/icon_diffusion_512px.png b/overrides/images/icon_diffusion_512px.png new file mode 100644 index 00000000..2c3dc683 Binary files /dev/null and b/overrides/images/icon_diffusion_512px.png differ diff --git a/overrides/images/icon_distillation.png b/overrides/images/icon_distillation.png new file mode 100644 index 00000000..6051b62e Binary files /dev/null and b/overrides/images/icon_distillation.png differ diff --git a/overrides/images/icon_text-generation_512px.jpg b/overrides/images/icon_text-generation_512px.jpg new file mode 100644 index 00000000..934b3c9d Binary files /dev/null and b/overrides/images/icon_text-generation_512px.jpg differ diff --git a/overrides/images/icon_text-generation_512px.png b/overrides/images/icon_text-generation_512px.png new file mode 100644 index 00000000..1435ac81 Binary files /dev/null and b/overrides/images/icon_text-generation_512px.png differ diff --git a/overrides/images/icon_text-generation_iso.png b/overrides/images/icon_text-generation_iso.png new file mode 100644 index 00000000..2458ec87 Binary files /dev/null and b/overrides/images/icon_text-generation_iso.png differ diff --git a/overrides/images/icon_vision-language-model.png b/overrides/images/icon_vision-language-model.png new file mode 100644 index 00000000..23a60e2d Binary files /dev/null and b/overrides/images/icon_vision-language-model.png differ diff --git a/overrides/images/icon_vision-language-model.png:Zone.Identifier b/overrides/images/icon_vision-language-model.png:Zone.Identifier new file mode 100644 index 00000000..bbbaf191 --- /dev/null +++ b/overrides/images/icon_vision-language-model.png:Zone.Identifier @@ -0,0 +1,3 @@ +[ZoneTransfer] +ZoneId=3 +HostUrl=https://www.figma.com/ diff --git a/overrides/images/icon_vision-language-model_512px.jpg b/overrides/images/icon_vision-language-model_512px.jpg new file mode 100644 index 00000000..7eb80d37 Binary files /dev/null and b/overrides/images/icon_vision-language-model_512px.jpg differ diff --git a/overrides/images/icon_vision-language-model_512px.png b/overrides/images/icon_vision-language-model_512px.png new file mode 100644 index 00000000..683e0eb6 Binary files /dev/null and b/overrides/images/icon_vision-language-model_512px.png differ diff --git a/overrides/images/iconnv_audio-generation_512x512.jpg b/overrides/images/iconnv_audio-generation_512x512.jpg new file mode 100644 index 00000000..dc6446b2 Binary files /dev/null and b/overrides/images/iconnv_audio-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_distillation_512x512.jpg b/overrides/images/iconnv_distillation_512x512.jpg new file mode 100644 index 00000000..2ffdcd9a Binary files /dev/null and b/overrides/images/iconnv_distillation_512x512.jpg differ diff --git a/overrides/images/iconnv_image-generation_512x512.jpg b/overrides/images/iconnv_image-generation_512x512.jpg new file mode 100644 index 00000000..c4659307 Binary files /dev/null and b/overrides/images/iconnv_image-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_llamaspeak_512x512.jpg b/overrides/images/iconnv_llamaspeak_512x512.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/overrides/images/iconnv_llamaspeak_512x512.jpg differ diff --git a/overrides/images/iconnv_nanodb_512x512.jpg b/overrides/images/iconnv_nanodb_512x512.jpg new file mode 100644 index 00000000..0e8d1340 Binary files /dev/null and b/overrides/images/iconnv_nanodb_512x512.jpg differ diff --git a/overrides/images/iconnv_nanosam_512x512.jpg b/overrides/images/iconnv_nanosam_512x512.jpg new file mode 100644 index 00000000..2a4e2456 Binary files /dev/null and b/overrides/images/iconnv_nanosam_512x512.jpg differ diff --git a/overrides/images/iconnv_text-generation_512x512.jpg b/overrides/images/iconnv_text-generation_512x512.jpg new file mode 100644 index 00000000..2feebd5c Binary files /dev/null and b/overrides/images/iconnv_text-generation_512x512.jpg differ diff --git a/overrides/images/iconnv_text-vision_512x512.jpg b/overrides/images/iconnv_text-vision_512x512.jpg new file mode 100644 index 00000000..dc04a1f6 Binary files /dev/null and b/overrides/images/iconnv_text-vision_512x512.jpg differ diff --git a/overrides/images/image-generation-2560x1440.jpg b/overrides/images/image-generation-2560x1440.jpg new file mode 100644 index 00000000..2b14d92b Binary files /dev/null and b/overrides/images/image-generation-2560x1440.jpg differ diff --git a/overrides/images/japanese-stable-diffusion.jpg b/overrides/images/japanese-stable-diffusion.jpg new file mode 100644 index 00000000..66fd4fdf Binary files /dev/null and b/overrides/images/japanese-stable-diffusion.jpg differ diff --git a/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png b/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png new file mode 100644 index 00000000..a442c0a1 Binary files /dev/null and b/overrides/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png differ diff --git a/overrides/images/llamaspeak-2560x1440.jpg b/overrides/images/llamaspeak-2560x1440.jpg new file mode 100644 index 00000000..dc921b5b Binary files /dev/null and b/overrides/images/llamaspeak-2560x1440.jpg differ diff --git a/overrides/images/nanodb-2560x1440.jpg b/overrides/images/nanodb-2560x1440.jpg new file mode 100644 index 00000000..a03e9e40 Binary files /dev/null and b/overrides/images/nanodb-2560x1440.jpg differ diff --git a/overrides/images/nanosam-2560x1440.jpg b/overrides/images/nanosam-2560x1440.jpg new file mode 100644 index 00000000..355ffd29 Binary files /dev/null and b/overrides/images/nanosam-2560x1440.jpg differ diff --git a/overrides/images/nvicon_llamaspeak_1200x1200.jpg b/overrides/images/nvicon_llamaspeak_1200x1200.jpg new file mode 100644 index 00000000..81a4fe0d Binary files /dev/null and b/overrides/images/nvicon_llamaspeak_1200x1200.jpg differ diff --git a/overrides/images/text-generation-2560x1440.jpg b/overrides/images/text-generation-2560x1440.jpg new file mode 100644 index 00000000..e97a466f Binary files /dev/null and b/overrides/images/text-generation-2560x1440.jpg differ diff --git a/overrides/images/text-vision-2560x1440.jpg b/overrides/images/text-vision-2560x1440.jpg new file mode 100644 index 00000000..45293d14 Binary files /dev/null and b/overrides/images/text-vision-2560x1440.jpg differ diff --git a/overrides/main.html b/overrides/main.html new file mode 100644 index 00000000..dab19aa3 --- /dev/null +++ b/overrides/main.html @@ -0,0 +1,40 @@ + +{% extends "base.html" %} + + +{# +{% block announce %} + +
View the recording of the last Jetson AI Lab Research Group meeting! The next meeting is on 4/17 at 9am PST.
+ +{% endblock %} +#} + +{% block scripts %} + +{{ super() }} + + +{% endblock %} \ No newline at end of file diff --git a/research.html b/research.html new file mode 100644 index 00000000..563e0f8b --- /dev/null +++ b/research.html @@ -0,0 +1,1903 @@ + + + + + + + + + + + + + + + + + + + + + + + Jetson AI Lab Research Group - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Jetson AI Lab Research Group

+

The Jetson AI Lab Research Group is a global collective for advancing open-source Edge ML, open to anyone to join and collaborate with others from the community and leverage each other's work. Our goal is using advanced AI for good in real-world applications in accessible and responsible ways. By coordinating together as a group, we can keep up with the rapidly evolving pace of AI and more quickly arrive at deploying intelligent multimodal agents and autonomous robots into the field.

+

There are virtual meetings that anyone is welcome to join, offline discussion on the Jetson Projects forum, and guidelines for upstreaming open-source contributions.

+
+

Next Meeting - 5/1

+

The next team meeting is on Wednesday, May 1st at 9am PST. View the recording from the last meeting below.

+
+ + +

Topics of Interest

+

These are some initial research topics for us to discuss and investigate. This list will vary over time as experiments evolve and the SOTA progresses:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
• Controller LLMs for dynamic pipeline code generation• Fine-tuning LLM/VLM onboard Jetson AGX Orin 64GB
• HomeAssistant.io integration for smart home [1] [2]• Continuous multi-image VLM streaming and change detection
• Recurrent LLM architectures (Mamba, RKVW, ect) [1]• Lightweight low-memory streaming ASR/TTS models
• Diffusion models for image processing and enhancement• Time Series Forecasting with Transformers [1] [2]
• Guidance, grammars, and guardrails for constrained output• Inline LLM function calling / plugins from API definitions
• ML DevOps, edge deployment, and orchestration• Robotics, IoT, and cyberphysical systems integration
+ +

New topics can be raised to the group either during the meetings or on the forums (people are welcome to work on whatever they want of course)

+

Contribution Guidelines

+ + +

When experiments are successful, ideally the results will be packaged in such a way that they are easily reusable for others to integrate into their own projects:

+
+ + + + + + +
+ +

Ongoing technical discussions are encouraged to occur on the forums or GitHub Issues, with status updates on projects given during the meetings.

+

Meeting Schedule

+

We'll aim to meet monthly or bi-weekly as a team in virtual meetings that anyone is welcome to join and speak during. We'll discuss the latest updates and experiments that we want to explore. Please remain courteous to others during the calls. We'll stick around after for anyone who has questions or didn't get the chance to be heard.

+
+

Wednesday, May 1 at 9am PST (5/1/24)

+
    +
  • Teams Invite and Agenda will be posted here.
  • +
+
+ + +

The agenda will be posted beforehand. Post to the forum to add agenda items. The meetings will be recorded so anyone unable to attend live can watch them after.

+

Past Meetings

+
April 17, 2017 + +
+ +

Topics Covered:

+ + + +
+ +
April 3, 2017 + +
+ +

Active Members

+

Below are shown some of the sustaining members of the group who have been working on generative AI in edge computing:

+ + +
+
+
+
+ + + + Dustin Franklin, NVIDIA
+ + Principal Engineer | Pittsburgh, PA
+ (jetson-inference, jetson-containers) +
+
+
+
+
+
+ + + Nurgaliyev Shakhizat
+ + Institute of Smart Systems and AI | Kazakhstan
+ (Assistive Devices, Vision2Audio, HPEC Clusters) +
+
+
+
+
+
+ + + Kris Kersey, Kersey Fabrications
+ + Embedded Software Engineer | Atlanta, GA
+ (The OASIS Project, AR/VR, 3D Fabrication) +
+
+
+
+
+
+ + + Johnny Núñez Cano
+ + PhD Researcher in CV/AI | Barcelona, Spain
+ (Recurrent LLMs, Pose & Behavior Analysis) +
+
+
+
+
+
+ + + Doruk Sönmez, Open Zeka
+ + Intelligent Video Analytics Engineer | Turkey
+ (NVIDIA DLI Certified Instructor, IVA, VLM) +
+
+
+
+
+
+ + + + Akash James, Spark Cognition
+ + AI Architect, UC Berkeley Researcher | Oakland
+ (NVIDIA AI Ambassador, Personal Assistants) +
+
+
+
+
+
+ + + Mieszko Syty, MS/1 Design
+ + AI/ML Engineer | Warsaw, Poland
+ (LLM, Home Assistants, ML DevOps) +
+
+
+
+
+
+ + + Jim Benson, JetsonHacks
+ + DIY Extraordinaire | Los Angeles, CA
+ (AI in Education, RACECAR/J) +
+
+
+
+
+
+ + + Chitoku Yato, NVIDIA
+ + Jetson AI DevTech | Santa Clara, CA
+ (JetBot, JetRacer, MinDisk, Containers) +
+
+
+
+
+
+ + Dana Sheahen, NVIDIA
+ + DLI Curriculum Developer | Santa Clara, CA
+ (AI in Education, Jetson AI Fundamentals) +
+
+
+
+
+
+ + + Sammy Ochoa, NVIDIA
+ + Jetson AI DevTech | Austin, TX
+ (Metropolis Microservices) +
+
+
+
+
+
+ + + John Welsh, NVIDIA
+ + (NanoOWL, NanoSAM, JetBot, JetRacer, torch2trt, trt_pose, Knowledge Distillation) + +
+
+
+
+
+ + + + Dave Niewinski
+ + Dave's Armoury | Waterloo, Ontario
+ (GLaDOS, Fetch, Offroad La-Z-Boy, KUKA Bot) +
+
+
+
+
+
+ + + + Gary Hilgemann, REBOTNIX
+ + CEO & AI Roboticist | Lünen, Germany
+ (GUSTAV, SPIKE, VisionTools, GenAI) +
+
+
+
+
+
+ + + + Elaine Wu, Seeed Studio
+ + AI & Robotics Partnerships | Shenzhen, China
+ (reComputer, YOLOv8, LocalJARVIS, Voice Bot) +
+
+
+
+
+
+ + Patty Delafuente, NVIDIA
+ + Data Scientist & UMBC PhD Student | MD
+ (AI in Education, DLI Robotics Teaching Kit) +
+
+
+
+
+
+ + + Song Han, MIT HAN Lab
+ + NVIDIA Research | Cambridge, MA
+ (Efficient Large Models, AWQ, VILA) +
+
+
+
+
+
+ + + Bryan Hughes, Mimzy AI
+ + Founder, Entrepreneur | SF Bay Area
+ (Multimodal Assistants, AI at the Edge) +
+
+
+
+
+
+ + + Tianqi Chen, CMU Catalyst
+ + OctoML, CTO | Seattle, WA
+ (MLC, Apache TVM, XGBoost) +
+
+
+
+
+
+ + + Michael Grüner, RidgeRun
+ + Team Lead / Architect | Costa Rica
+ (Embedded Vision & AI, Multimedia) +
+
+
+
+
+
+ + Jesse Flot, CMU Robotics Academy
+ + Co-Director | Pittsburgh, PA
+ (Applied AI & Robotics, Autonomy Foundations) +
+
+
+
+
+
+ + + + Paul DeCarlo, Microsoft
+ + Professor | University of Houston
+ (Azure IoT, Video Analytics, Microsoft JARVIS) +
+
+
+
+
+
+ + + Mike Hansen, Nabu Casa
+ + Voice Engineering Lead | Urbandale, Iowa
+ (Home Assistant, Piper TTS, Wyoming) +
+
+
+
+
+
+ + + Lakshantha Dissanayake, Ultralytics
+ + Embedded CV Engineer | Vancouver, BC
+ (YOLOv8, TensorRT, DeepStream) +
+
+
+
+
+
+ + + Kerry Shih, OStream
+ + Founder, CEO | Los Angeles, CA
+ (GenRunner, GenAI Nerds) +
+
+
+
+
+
+ + Ziad Youssfi, CMU
+ + ECE Professor | Pittsburgh, PA
+ (ML in Robotics & Embedded Systems) +
+
+
+
+
+
+ + + Walter Lucetti, Stereolabs
+ + Robotics & Vision Engineer | Paris, France
+ (MyzharBot, ROS2, GStreamer) +
+
+
+
+
+
+ + + + Raffaello Bonghi, NVIDIA
+ + AI & Robotics Engineer | Manchester, UK
+ (Nanosaur, Panther, jetson-stats) +
+
+
+
+ + + + + + + + + + +
+
+ + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/research/images/Akash_James.jpg b/research/images/Akash_James.jpg new file mode 100644 index 00000000..b8ccc947 Binary files /dev/null and b/research/images/Akash_James.jpg differ diff --git a/research/images/Bryan_Hughes.jpg b/research/images/Bryan_Hughes.jpg new file mode 100644 index 00000000..c65efefa Binary files /dev/null and b/research/images/Bryan_Hughes.jpg differ diff --git a/research/images/Chitoku_Yato.jpg b/research/images/Chitoku_Yato.jpg new file mode 100644 index 00000000..33d91417 Binary files /dev/null and b/research/images/Chitoku_Yato.jpg differ diff --git a/research/images/Dana_Sheahen.jpg b/research/images/Dana_Sheahen.jpg new file mode 100644 index 00000000..12b6e397 Binary files /dev/null and b/research/images/Dana_Sheahen.jpg differ diff --git a/research/images/Dave_Niewinski.jpg b/research/images/Dave_Niewinski.jpg new file mode 100644 index 00000000..ffa5a7a0 Binary files /dev/null and b/research/images/Dave_Niewinski.jpg differ diff --git a/research/images/Doruk_Sonmez.png b/research/images/Doruk_Sonmez.png new file mode 100644 index 00000000..a1dd08cc Binary files /dev/null and b/research/images/Doruk_Sonmez.png differ diff --git a/research/images/Dustin_Franklin.jpg b/research/images/Dustin_Franklin.jpg new file mode 100644 index 00000000..702e9495 Binary files /dev/null and b/research/images/Dustin_Franklin.jpg differ diff --git a/research/images/Elaine_Wu.jpg b/research/images/Elaine_Wu.jpg new file mode 100644 index 00000000..f7a589b0 Binary files /dev/null and b/research/images/Elaine_Wu.jpg differ diff --git a/research/images/Gary_Hilgemann.jpg b/research/images/Gary_Hilgemann.jpg new file mode 100644 index 00000000..faea2b38 Binary files /dev/null and b/research/images/Gary_Hilgemann.jpg differ diff --git a/research/images/Jesse_Flot.jpg b/research/images/Jesse_Flot.jpg new file mode 100644 index 00000000..8a3b36ac Binary files /dev/null and b/research/images/Jesse_Flot.jpg differ diff --git a/research/images/Jim_Benson.png b/research/images/Jim_Benson.png new file mode 100644 index 00000000..3054da91 Binary files /dev/null and b/research/images/Jim_Benson.png differ diff --git a/research/images/John_Welsh.jpg b/research/images/John_Welsh.jpg new file mode 100644 index 00000000..d7f3cb96 Binary files /dev/null and b/research/images/John_Welsh.jpg differ diff --git a/research/images/Johnny_Cano.jpg b/research/images/Johnny_Cano.jpg new file mode 100644 index 00000000..f59569f7 Binary files /dev/null and b/research/images/Johnny_Cano.jpg differ diff --git a/research/images/Kerry_Shih.jpg b/research/images/Kerry_Shih.jpg new file mode 100644 index 00000000..4019efea Binary files /dev/null and b/research/images/Kerry_Shih.jpg differ diff --git a/research/images/Kris_Kersey.jpg b/research/images/Kris_Kersey.jpg new file mode 100644 index 00000000..252d28df Binary files /dev/null and b/research/images/Kris_Kersey.jpg differ diff --git a/research/images/Lakshantha_Dissanayake.jpg b/research/images/Lakshantha_Dissanayake.jpg new file mode 100644 index 00000000..265f77e9 Binary files /dev/null and b/research/images/Lakshantha_Dissanayake.jpg differ diff --git a/research/images/Michael_Gruner.jpg b/research/images/Michael_Gruner.jpg new file mode 100644 index 00000000..245a3c7f Binary files /dev/null and b/research/images/Michael_Gruner.jpg differ diff --git a/research/images/Mieszko_Syty.jpg b/research/images/Mieszko_Syty.jpg new file mode 100644 index 00000000..6918cfcb Binary files /dev/null and b/research/images/Mieszko_Syty.jpg differ diff --git a/research/images/Mike_Hansen.jpg b/research/images/Mike_Hansen.jpg new file mode 100644 index 00000000..aa025b65 Binary files /dev/null and b/research/images/Mike_Hansen.jpg differ diff --git a/research/images/Patty_Delafuente.jpg b/research/images/Patty_Delafuente.jpg new file mode 100644 index 00000000..a7435091 Binary files /dev/null and b/research/images/Patty_Delafuente.jpg differ diff --git a/research/images/Paul_DeCarlo.jpg b/research/images/Paul_DeCarlo.jpg new file mode 100644 index 00000000..00737c3b Binary files /dev/null and b/research/images/Paul_DeCarlo.jpg differ diff --git a/research/images/Raffaello_Bonghi.jpg b/research/images/Raffaello_Bonghi.jpg new file mode 100644 index 00000000..f8600e75 Binary files /dev/null and b/research/images/Raffaello_Bonghi.jpg differ diff --git a/research/images/Sammy_Ochoa.jpg b/research/images/Sammy_Ochoa.jpg new file mode 100644 index 00000000..68218789 Binary files /dev/null and b/research/images/Sammy_Ochoa.jpg differ diff --git a/research/images/Shakhizat_Nurgaliyev.jpg b/research/images/Shakhizat_Nurgaliyev.jpg new file mode 100644 index 00000000..69640de6 Binary files /dev/null and b/research/images/Shakhizat_Nurgaliyev.jpg differ diff --git a/research/images/Song_Han.jpg b/research/images/Song_Han.jpg new file mode 100644 index 00000000..86d61250 Binary files /dev/null and b/research/images/Song_Han.jpg differ diff --git a/research/images/Tianqi_Chen.jpg b/research/images/Tianqi_Chen.jpg new file mode 100644 index 00000000..21cf437b Binary files /dev/null and b/research/images/Tianqi_Chen.jpg differ diff --git a/research/images/Walter_Lucetti.jpg b/research/images/Walter_Lucetti.jpg new file mode 100644 index 00000000..f9dba28f Binary files /dev/null and b/research/images/Walter_Lucetti.jpg differ diff --git a/research/images/Ziad_Youssfi.jpg b/research/images/Ziad_Youssfi.jpg new file mode 100644 index 00000000..555c73c8 Binary files /dev/null and b/research/images/Ziad_Youssfi.jpg differ diff --git a/research/images/github.png b/research/images/github.png new file mode 100644 index 00000000..d6ec7c80 Binary files /dev/null and b/research/images/github.png differ diff --git a/research/images/hackster.png b/research/images/hackster.png new file mode 100644 index 00000000..f0cf8254 Binary files /dev/null and b/research/images/hackster.png differ diff --git a/research/images/linkedin.png b/research/images/linkedin.png new file mode 100644 index 00000000..db3b973f Binary files /dev/null and b/research/images/linkedin.png differ diff --git a/research/images/robots_0.jpg b/research/images/robots_0.jpg new file mode 100644 index 00000000..11225d74 Binary files /dev/null and b/research/images/robots_0.jpg differ diff --git a/research/images/robots_1.jpg b/research/images/robots_1.jpg new file mode 100644 index 00000000..fe2a2825 Binary files /dev/null and b/research/images/robots_1.jpg differ diff --git a/research/images/youtube.png b/research/images/youtube.png new file mode 100644 index 00000000..715ffcd5 Binary files /dev/null and b/research/images/youtube.png differ diff --git a/research/invites/Jetson AI Lab Research Group (41724).ics b/research/invites/Jetson AI Lab Research Group (41724).ics new file mode 100644 index 00000000..3b4a4ef8 --- /dev/null +++ b/research/invites/Jetson AI Lab Research Group (41724).ics @@ -0,0 +1,698 @@ +BEGIN:VCALENDAR +PRODID:-//Microsoft Corporation//Outlook 16.0 MIMEDIR//EN +VERSION:2.0 +METHOD:REQUEST +X-MS-OLK-FORCEINSPECTOROPEN:TRUE +BEGIN:VTIMEZONE +TZID:Eastern Standard Time +BEGIN:STANDARD +DTSTART:16011104T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11 +TZOFFSETFROM:-0400 +TZOFFSETTO:-0500 +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:16010311T020000 +RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3 +TZOFFSETFROM:-0500 +TZOFFSETTO:-0400 +END:DAYLIGHT +END:VTIMEZONE +BEGIN:VEVENT +CLASS:PUBLIC +CREATED:20240404T032045Z +DESCRIPTION:Agenda will be posted to: https://www.jetson-ai-lab.com/resear + ch.html#meeting-schedule\nTo add topics post to: https://forums.developer + .nvidia.com/t/jetson-ai-lab-research-group-meeting-on-4-17/288491\n_______ + _________________________________________________________________________\ + nMicrosoft Teams Need help? + \nJoin the meeting now \nMeeting ID: 267 767 899 + 278 \nPasscode: nA8Yiw \n________________________________\n\nDial-in by ph + one \n+1 949-570-1120\,\,507782695# Unite + d States\, Irvine \nFind a local number \nPhone conference I + D: 507 782 695# \nJoin on a video conferencing device \nTenant key: teams@ + vc.nvidia.com \nVideo ID: 117 259 101 9 \nMore info \nFor organizers: Meeting options | Reset dial-in PIN \n____________________________________________________ + ____________________________\n \n +DTEND;TZID="Eastern Standard Time":20240417T130000 +DTSTAMP:20240404T032045Z +DTSTART;TZID="Eastern Standard Time":20240417T120000 +LAST-MODIFIED:20240404T032045Z +LOCATION:Microsoft Teams Meeting +ORGANIZER;CN="Dustin Franklin":mailto:dustinf@nvidia.com +PRIORITY:5 +SEQUENCE:0 +SUMMARY;LANGUAGE=en-us:Jetson AI Lab Research Group (4/17/24) +TRANSP:OPAQUE +UID:040000008200E00074C5B7101A82E00800000000607E63361B86DA01000000000000000 + 010000000BB2295284EE52B47943FB644ADF8B55A +X-ALT-DESC;FMTTYPE=text/html:< + style>

Join us for the ina + ugural team meeting of the Jetson AI Lab Research Group!&nb + sp\; \;

 \;

On the agenda\, we will discuss the latest updates\, G + TC recap\, and experiments that we want to explore.

 \;

Anyone is welcome to j + oin and speak. Please remain courteous of others during the call\, and sta + y muted if you aren’\;t speaking.  + \; We’\;ll stick around after for anyone who has questions or + didn't get the chance to be heard. \; < + /span>This meeting will be recorded and posted afterwards for anyone unabl + e to attend.

 \;

< + div style='margin-bottom:.25in\;overflow:hidden'>

_______________________________________________ + _________________________________

Mic + rosoft Teams Need help?

Meeting ID: 223 573 467 074

Passcode: 6ybv + Cg


Dial-in by phone

+1 949-570-1120\,\,4 + 79114164# United States\, Irvine

Phone conference ID: 479 114 164#

Join on a v + ideo conferencing device

Tenant key: teams@vc.nvidia.com +

Video ID: 117 895 093 3< + span style='font-family:"Segoe UI"\,sans-serif\;mso-fareast-font-family:"T + imes New Roman"\;color:#242424'>

For organizers: Meeting options | Reset dial-in PIN +

____________________________________ + ____________________________________________

 \;

+X-MICROSOFT-CDO-BUSYSTATUS:BUSY +X-MICROSOFT-CDO-IMPORTANCE:1 +X-MICROSOFT-DISALLOW-COUNTER:FALSE +X-MS-OLK-AUTOFILLLOCATION:FALSE +X-MS-OLK-CONFTYPE:0 +BEGIN:VALARM +TRIGGER:-PT15M +ACTION:DISPLAY +DESCRIPTION:Reminder +END:VALARM +END:VEVENT +END:VCALENDAR diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..dfea07e7 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"benchmarks.html","title":"Benchmarks","text":""},{"location":"benchmarks.html#large-language-models-llm","title":"Large Language Models (LLM)","text":"

For running LLM benchmarks, see the MLC container documentation.

"},{"location":"benchmarks.html#small-language-models-slm","title":"Small Language Models (SLM)","text":"

Small language models are generally defined as having fewer than 7B parameters (Llama-7B shown for reference) For more data and info about running these models, see the SLM tutorial and MLC container documentation.

"},{"location":"benchmarks.html#vision-language-models-vlm","title":"Vision Language Models (VLM)","text":"

This measures the end-to-end pipeline performance for continuous streaming like with Live Llava. For more data and info about running these models, see the NanoVLM tutorial.

"},{"location":"benchmarks.html#vision-transformers-vit","title":"Vision Transformers (ViT)","text":"

VIT performance data from [1] [2] [3]

"},{"location":"benchmarks.html#stable-diffusion","title":"Stable Diffusion","text":""},{"location":"benchmarks.html#riva","title":"Riva","text":"

For running Riva benchmarks, see ASR Performance and TTS Performance.

"},{"location":"benchmarks.html#vector-database","title":"Vector Database","text":"

For running vector database benchmarks, see the NanoDB container documentation.

"},{"location":"community_articles.html","title":"Community Projects","text":"

Below, you'll find a collection of guides, tutorials, and articles contributed by the community showcasing the implementation of generative AI on the Jetson platform.

"},{"location":"community_articles.html#github-japanese-nmt-translation-for-stable-diffusion-2-23-2024","title":"GitHub Japanese NMT Translation for Stable Diffusion (2-23-2024)","text":"

Toshihiko Aoki has created a prompt generator for stable-diffusion-webui that translates Japanese queries into English using a fine-tuned GPT-2 NMT model before feeding them into Stable Diffusion. Check out the full guide on GitHub under to-aoki/ja-tiny-sd-webui, including the training dataset and LoRA building!

Your browser does not support the video tag."},{"location":"community_articles.html#github-jetbot-voice-to-action-tools-empowering-your-ros2-robot-with-voice-control-2-17-2024","title":"GitHub JetBot Voice to Action Tools: Empowering Your ROS2 Robot with Voice Control (2-17-2024)","text":"

Jen Hung Ho created ROS2 nodes for ASR/TTS on Jetson Nano that can can be used to control JetBot, including customizable voice commands and the execution of advanced actions. Check it out on GitHub under Jen-Hung-Ho/ros2_jetbot_tools and Jen-Hung-Ho/ros2_jetbot_voice and on the forums here.

"},{"location":"community_articles.html#hackster-clearwater-underwater-image-enhancement-with-generative-ai-2-16-2024","title":"Hackster ClearWater: Underwater Image Enhancement with Generative AI (2-16-2024)","text":"

Vy Pham has created a novel denoising pipeline using a custom trained Transformer-based diffusion model and GAN upscaler for image enhancement, running on Jetson AGX Orin. It runs interactively in a Streamlit web UI for photo capturing and the processing of images and videos. Great work!

Your browser does not support the video tag."},{"location":"community_articles.html#hackster-ai-powered-application-for-the-blind-and-visually-impaired-12-13-2023","title":"Hackster AI-Powered Application for the Blind and Visually Impaired (12-13-2023)","text":"

Nurgaliyev Shakhizat demonstrates a locally-hosted Blind Assistant Device running on Jetson AGX Orin 64GB Developer Kit for realtime image-to-speech translation:

\u00a0 Find more resources about this project here: [Hackster] [GitHub]

"},{"location":"community_articles.html#daves-armoury-bringing-glados-to-life-with-robotics-and-ai-2-8-2024","title":"Dave's Armoury Bringing GLaDOS to life with Robotics and AI (2-8-2024)","text":"

See how DIY robotics legend Dave Niewinski from davesarmoury.com brings GLaDOS to life using Jetson AGX Orin, running LLMs onboard alongside object + depth tracking, and RIVA ASR/TTS with a custom-trained voice model for speech recognition and synthesis! Using Unitree Z1 arm with 3D printing and StereoLabs ZED2.

\u00a0 Find more resources about this project here: [Forums] [GitHub]

"},{"location":"community_articles.html#hackster-seeed-studios-local-voice-chatbot-puts-a-speech-recognizing-llama-2-llm-on-your-jetson-2-7-2024","title":"Hackster Seeed Studio's Local Voice Chatbot Puts a Speech-Recognizing LLaMa-2 LLM on Your Jetson (2-7-2024)","text":"

Seeed Studio has announced the launch of the Local Voice Chatbot, an NVIDIA Riva- and LLaMa-2-based large language model (LLM) chatbot with voice recognition capabilities \u2014 running entirely locally on NVIDIA Jetson devices, including the company's own reComputer range. Follow the step-by-step guide on the Seeed Studio wiki.

"},{"location":"community_articles.html#youtube-genai-nerds-react-insider-look-at-nvidias-newest-generative-ai-2-6-2024","title":"YouTube GenAI Nerds React - Insider Look at NVIDIA's Newest Generative AI (2-6-2024)","text":"

Watch this panel about the latest trends & tech in edge AI, featuring Kerry Shih from OStream, Jim Benson from JetsonHacks, and Dusty from NVIDIA.

"},{"location":"community_articles.html#nvidia-bringing-generative-ai-to-life-with-nvidia-jetson-11-7-2023","title":"NVIDIA Bringing Generative AI to Life with NVIDIA Jetson (11-7-2023)","text":"

Watch this webinar about deploying LLMs, VLMs, ViTs, and vector databases onboard Jetson Orin for building next-generation applications using Generative AI:

"},{"location":"community_articles.html#jetsonhacks-jetson-ai-labs-generative-ai-playground-10-31-2023","title":"JetsonHacks Jetson AI Labs \u2013 Generative AI Playground (10-31-2023)","text":"

JetsonHacks publishes an insightful video that walks developers through the typical steps for running generative AI models on Jetson following this site's tutorials. The video shows the interaction with the LLaVA model.

"},{"location":"community_articles.html#hackster-vision2audio-giving-the-blind-an-understanding-through-ai-10-15-2023","title":"Hackster Vision2Audio - Giving the blind an understanding through AI (10-15-2023)","text":"

Nurgaliyev Shakhizat demonstrates Vision2Audio running on Jetson AGX Orin 64GB Developer Kit to harness the power of LLaVA to help visually impaired people:

"},{"location":"community_articles.html#nvidia-generative-ai-models-at-the-edge-10-19-2023","title":"NVIDIA Generative AI Models at the Edge (10-19-2023)","text":"

Follow this walkthrough of the Jetson AI Lab tutorials along with coverage of the latest features and advances coming to JetPack 6 and beyond:

\u00a0 Technical Blog - https://developer.nvidia.com/blog/bringing-generative-ai-to-life-with-jetson/

"},{"location":"community_articles.html#medium-how-to-set-up-your-jetson-device-for-llm-inference-and-fine-tuning-10-02-2023","title":"Medium How to set up your Jetson device for LLM inference and fine-tuning (10-02-2023)","text":"

Michael Yuan's guide demonstrating how to set up the Jetson AGX Orin 64GB Developer Kit specifically for large language model (LLM) inference, highlighting the crucial role of GPUs and the cost-effectiveness of the Jetson AGX Orin for LLM tasks.

https://medium.com/@michaelyuan_88928/how-to-set-up-your-jetson-device-for-llm-inference-and-fine-tuning-682e36444d43

"},{"location":"community_articles.html#hackster-getting-started-with-ai-on-nvidia-jetson-agx-orin-dev-kit-09-16-2023","title":"Hackster Getting Started with AI on Nvidia Jetson AGX Orin Dev Kit (09-16-2023)","text":"

Nurgaliyev Shakhizat demonstrates llamaspeak on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

"},{"location":"community_articles.html#hackster-new-ai-tool-is-generating-a-lot-of-buzz-09-13-2023","title":"Hackster New AI Tool Is Generating a Lot of Buzz (09-13-2023)","text":"

Nick Bild provides an insightful introduction to the Jetson Generative AI Playground:

https://www.hackster.io/news/new-ai-tool-is-generating-a-lot-of-buzz-3cc5f23a3598

"},{"location":"community_articles.html#jetsonhacks-use-these-jetson-docker-containers-tutorial-09-04-2023","title":"JetsonHacks Use These! Jetson Docker Containers Tutorial (09-04-2023)","text":"

JetsonHacks has a in-depth tutorial on how to use jetson-containers and even show text-generation-webui and stable-diffusion-webui containers in action!

"},{"location":"community_articles.html#hackster-llama-2-llms-w-nvidia-jetson-and-textgeneration-web-ui-08-17-2023","title":"Hackster LLaMa 2 LLMs w/ NVIDIA Jetson and textgeneration-web-ui (08-17-2023)","text":"

Paul DeCarlo demonstrates 13B and 70B parameter LLama 2 models running locally on Jetson AGX Orin 64GB Developer Kit in this Hackster post:

"},{"location":"community_articles.html#hackster-running-a-chatgpt-like-llm-llama2-on-a-nvidia-jetson-cluster-08-14-2023","title":"Hackster Running a ChatGPT-Like LLM-LLaMA2 on a Nvidia Jetson Cluster (08-14-2023)","text":"

Discover how to run a LLaMA-2 7B model on an NVIDIA Jetson cluster in this insightful tutorial by Nurgaliyev Shakhizat:

"},{"location":"community_articles.html#jetsonhacks-speech-ai-on-nvidia-jetson-tutorial-08-07-2023","title":"JetsonHacks Speech AI on NVIDIA Jetson Tutorial (08-07-2023)","text":"

JetsonHacks gives a nice introduction to NVIDIA RIVA SDK and demonstrate its automated speech recognition (ASR) capability on Jetson Orin Nano Developer Kit.

"},{"location":"community_articles.html#hackster-llm-based-multimodal-ai-w-azure-open-ai-nvidia-jetson-07-12-2023","title":"Hackster LLM based Multimodal AI w/ Azure Open AI & NVIDIA Jetson (07-12-2023)","text":"

Learn how to harness the power of Multimodal AI by running Microsoft JARVIS on an Jetson AGX Orin 64GB Developer Kit, enabling a wide range of AI tasks with ChatGPT-like capabilities, image generation, and more, in this comprehensive guide by Paul DeCarlo.

"},{"location":"community_articles.html#hackster-how-to-run-a-chatgpt-like-llm-on-nvidia-jetson-board-06-13-2023","title":"Hackster How to Run a ChatGPT-Like LLM on NVIDIA Jetson board (06-13-2023)","text":"

Nurgaliyev Shakhizat explores voice AI assistant on Jetson using FastChat and VoskAPI.

"},{"location":"getting-started.html","title":"Getting started","text":""},{"location":"hello_ai_world.html","title":"Hello AI World","text":"

Hello AI World is an in-depth tutorial series for DNN-based inference and training of image classification, object detection, semantic segmentation, and more. It is built on the jetson-inference library using TensorRT for optimized performance on Jetson.

It's highly recommended to familiarize yourself with the concepts of machine learning and computer vision before diving into the more advanced topics of generative AI here on the Jetson AI Lab. Many of these models will prove useful to have during your development.

HELLO AI WORLD >> https://github.com/dusty-nv/jetson-inference

"},{"location":"research.html","title":"Jetson AI Lab Research Group","text":"

The Jetson AI Lab Research Group is a global collective for advancing open-source Edge ML, open to anyone to join and collaborate with others from the community and leverage each other's work. Our goal is using advanced AI for good in real-world applications in accessible and responsible ways. By coordinating together as a group, we can keep up with the rapidly evolving pace of AI and more quickly arrive at deploying intelligent multimodal agents and autonomous robots into the field.

There are virtual meetings that anyone is welcome to join, offline discussion on the Jetson Projects forum, and guidelines for upstreaming open-source contributions.

Next Meeting - 5/1

The next team meeting is on Wednesday, May 1st at 9am PST. View the recording from the last meeting below.

"},{"location":"research.html#topics-of-interest","title":"Topics of Interest","text":"

These are some initial research topics for us to discuss and investigate. This list will vary over time as experiments evolve and the SOTA progresses:

\u2022 Controller LLMs for dynamic pipeline code generation \u2022 Fine-tuning LLM/VLM onboard Jetson AGX Orin 64GB \u2022 HomeAssistant.io integration for smart home [1] [2] \u2022 Continuous multi-image VLM streaming and change detection \u2022 Recurrent LLM architectures (Mamba, RKVW, ect) [1] \u2022 Lightweight low-memory streaming ASR/TTS models \u2022 Diffusion models for image processing and enhancement \u2022 Time Series Forecasting with Transformers [1] [2] \u2022 Guidance, grammars, and guardrails for constrained output \u2022 Inline LLM function calling / plugins from API definitions \u2022 ML DevOps, edge deployment, and orchestration \u2022 Robotics, IoT, and cyberphysical systems integration

New topics can be raised to the group either during the meetings or on the forums (people are welcome to work on whatever they want of course)

"},{"location":"research.html#contribution-guidelines","title":"Contribution Guidelines","text":"

When experiments are successful, ideally the results will be packaged in such a way that they are easily reusable for others to integrate into their own projects:

  • Open-source libraries & code on GitHub
  • Models on HuggingFace Hub
  • Containers provided by jetson-containers
  • Discussions on the Jetson Projects forum
  • Documentation & tutorials on Jetson AI Lab
  • Hackster.io for hardware-centric builds

Ongoing technical discussions are encouraged to occur on the forums or GitHub Issues, with status updates on projects given during the meetings.

"},{"location":"research.html#meeting-schedule","title":"Meeting Schedule","text":"

We'll aim to meet monthly or bi-weekly as a team in virtual meetings that anyone is welcome to join and speak during. We'll discuss the latest updates and experiments that we want to explore. Please remain courteous to others during the calls. We'll stick around after for anyone who has questions or didn't get the chance to be heard.

Wednesday, May 1 at 9am PST (5/1/24)

  • Teams Invite and Agenda will be posted here.

The agenda will be posted beforehand. Post to the forum to add agenda items. The meetings will be recorded so anyone unable to attend live can watch them after.

"},{"location":"research.html#past-meetings","title":"Past Meetings","text":"April 17, 2017

Topics Covered:

  • Ollama Support for Jetson Devices
  • Home Assistant Integration
  • jetson-container Updates
  • Upgrading JetBot with Orin Nano
  • April 3, 2017

    Project Kickoffs:

    • Home Assistant Integration
    • Agent Controller LLM
    • ML DevOps, Containers, Core Inferencing
    • "},{"location":"research.html#active-members","title":"Active Members","text":"

      Below are shown some of the sustaining members of the group who have been working on generative AI in edge computing:

      Dustin Franklin, NVIDIA Principal Engineer | Pittsburgh, PA (jetson-inference, jetson-containers) Nurgaliyev Shakhizat Institute of Smart Systems and AI | Kazakhstan (Assistive Devices, Vision2Audio, HPEC Clusters) Kris Kersey, Kersey Fabrications Embedded Software Engineer | Atlanta, GA (The OASIS Project, AR/VR, 3D Fabrication) Johnny N\u00fa\u00f1ez Cano PhD Researcher in CV/AI | Barcelona, Spain (Recurrent LLMs, Pose & Behavior Analysis) Doruk S\u00f6nmez, Open Zeka Intelligent Video Analytics Engineer | Turkey (NVIDIA DLI Certified Instructor, IVA, VLM) Akash James, Spark Cognition AI Architect, UC Berkeley Researcher | Oakland (NVIDIA AI Ambassador, Personal Assistants) Mieszko Syty, MS/1 Design AI/ML Engineer | Warsaw, Poland (LLM, Home Assistants, ML DevOps) Jim Benson, JetsonHacks DIY Extraordinaire | Los Angeles, CA (AI in Education, RACECAR/J) Chitoku Yato, NVIDIA Jetson AI DevTech | Santa Clara, CA (JetBot, JetRacer, MinDisk, Containers) Dana Sheahen, NVIDIA DLI Curriculum Developer | Santa Clara, CA (AI in Education, Jetson AI Fundamentals) Sammy Ochoa, NVIDIA Jetson AI DevTech | Austin, TX (Metropolis Microservices) John Welsh, NVIDIA (NanoOWL, NanoSAM, JetBot, JetRacer, torch2trt, trt_pose, Knowledge Distillation) Dave Niewinski Dave's Armoury | Waterloo, Ontario (GLaDOS, Fetch, Offroad La-Z-Boy, KUKA Bot) Gary Hilgemann, REBOTNIX CEO & AI Roboticist | L\u00fcnen, Germany (GUSTAV, SPIKE, VisionTools, GenAI) Elaine Wu, Seeed Studio AI & Robotics Partnerships | Shenzhen, China (reComputer, YOLOv8, LocalJARVIS, Voice Bot) Patty Delafuente, NVIDIA Data Scientist & UMBC PhD Student | MD (AI in Education, DLI Robotics Teaching Kit) Song Han, MIT HAN Lab NVIDIA Research | Cambridge, MA (Efficient Large Models, AWQ, VILA) Bryan Hughes, Mimzy AI Founder, Entrepreneur | SF Bay Area (Multimodal Assistants, AI at the Edge) Tianqi Chen, CMU Catalyst OctoML, CTO | Seattle, WA (MLC, Apache TVM, XGBoost) Michael Gr\u00fcner, RidgeRun Team Lead / Architect | Costa Rica (Embedded Vision & AI, Multimedia) Jesse Flot, CMU Robotics Academy Co-Director | Pittsburgh, PA (Applied AI & Robotics, Autonomy Foundations) Paul DeCarlo, Microsoft Professor | University of Houston (Azure IoT, Video Analytics, Microsoft JARVIS) Mike Hansen, Nabu Casa Voice Engineering Lead | Urbandale, Iowa (Home Assistant, Piper TTS, Wyoming) Lakshantha Dissanayake, Ultralytics Embedded CV Engineer | Vancouver, BC (YOLOv8, TensorRT, DeepStream) Kerry Shih, OStream Founder, CEO | Los Angeles, CA (GenRunner, GenAI Nerds) Ziad Youssfi, CMU ECE Professor | Pittsburgh, PA (ML in Robotics & Embedded Systems) Walter Lucetti, Stereolabs Robotics & Vision Engineer | Paris, France (MyzharBot, ROS2, GStreamer) Raffaello Bonghi, NVIDIA AI & Robotics Engineer | Manchester, UK (Nanosaur, Panther, jetson-stats)"},{"location":"tips_ram-optimization.html","title":"RAM Optimization","text":"

      Running a LLM requires a huge RAM space.

      Especially if you are on Jetson Orin Nano that only has 8GB of RAM, it is crucial to leave as much RAM space available for models.

      Here we share a couple of ways to optimize the system RAM usage.

      "},{"location":"tips_ram-optimization.html#disabling-the-desktop-gui","title":"Disabling the Desktop GUI","text":"

      If you use your Jetson remotely through SSH, you can disable the Ubuntu desktop GUI. This will free up extra memory that the window manager and desktop uses (around ~800MB for Unity/GNOME).

      You can disable the desktop temporarily, run commands in the console, and then re-start the desktop when desired:

      $ sudo init 3     # stop the desktop\n# log your user back into the console (Ctrl+Alt+F1, F2, ect)\n$ sudo init 5     # restart the desktop\n

      If you wish to make this persistent across reboots, you can use the following commands to change the boot-up behavior:

      • To disable desktop on boot

        sudo systemctl set-default multi-user.target\n
      • To enable desktop on boot

        sudo systemctl set-default graphical.target\n
      "},{"location":"tips_ram-optimization.html#disabling-misc-services","title":"Disabling misc services","text":"
      sudo systemctl disable nvargus-daemon.service\n
      "},{"location":"tips_ram-optimization.html#mounting-swap","title":"Mounting Swap","text":"

      If you're building containers or working with large models, it's advisable to mount SWAP (typically correlated with the amount of memory in the board). Run these commands to disable ZRAM and create a swap file:

      If you have NVMe SSD storage available, it's preferred to allocate the swap file on the NVMe SSD.

      sudo systemctl disable nvzramconfig\nsudo fallocate -l 16G /ssd/16GB.swap\nsudo mkswap /ssd/16GB.swap\nsudo swapon /ssd/16GB.swap\n

      Then add the following line to the end of /etc/fstab to make the change persistent:

      /ssd/16GB.swap  none  swap  sw 0  0\n
      "},{"location":"tips_ssd-docker.html","title":"Tips - SSD + Docker","text":"

      Once you have your Jetson set up by flashing the latest Jetson Linux (L4T) BSP on it or by flashing the SD card with the whole JetPack image, before embarking on testing out all the great generative AI application using jetson-containers, you want to make sure you have a huge storage space for all the containers and the models you will download.

      We are going to show how you can install SSD on your Jetson, and set it up for Docker.

      "},{"location":"tips_ssd-docker.html#ssd","title":"SSD","text":""},{"location":"tips_ssd-docker.html#physical-installation","title":"Physical installation","text":"
      1. Unplug power and any peripherals from the Jetson developer kit.
      2. Physically install an NVMe SSD card on the carrier board of your Jetson developer kit, making sure to properly seat the connector and secure with the screw.
      3. Reconnect any peripherals, and then reconnect the power supply to turn on the Jetson developer kit.
      4. Once the system is up, verify that your Jetson identifies a new memory controller on PCI bus:

        lspci\n

        The output should look like the following:

        0007:01:00.0 Non-Volatile memory controller: Marvell Technology Group Ltd. Device 1322 (rev 02)\n
      "},{"location":"tips_ssd-docker.html#format-and-set-up-auto-mount","title":"Format and set up auto-mount","text":"
      1. Run lsblk to find the device name.

        lsblk\n

        The output should look like the following:

        NAME         MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT\nloop0          7:0    0    16M  1 loop \nmmcblk1      179:0    0  59.5G  0 disk \n\u251c\u2500mmcblk1p1  179:1    0    58G  0 part /\n\u251c\u2500mmcblk1p2  179:2    0   128M  0 part \n\u251c\u2500mmcblk1p3  179:3    0   768K  0 part \n\u251c\u2500mmcblk1p4  179:4    0  31.6M  0 part \n\u251c\u2500mmcblk1p5  179:5    0   128M  0 part \n\u251c\u2500mmcblk1p6  179:6    0   768K  0 part \n\u251c\u2500mmcblk1p7  179:7    0  31.6M  0 part \n\u251c\u2500mmcblk1p8  179:8    0    80M  0 part \n\u251c\u2500mmcblk1p9  179:9    0   512K  0 part \n\u251c\u2500mmcblk1p10 179:10   0    64M  0 part \n\u251c\u2500mmcblk1p11 179:11   0    80M  0 part \n\u251c\u2500mmcblk1p12 179:12   0   512K  0 part \n\u251c\u2500mmcblk1p13 179:13   0    64M  0 part \n\u2514\u2500mmcblk1p14 179:14   0 879.5M  0 part \nzram0        251:0    0   1.8G  0 disk [SWAP]\nzram1        251:1    0   1.8G  0 disk [SWAP]\nzram2        251:2    0   1.8G  0 disk [SWAP]\nzram3        251:3    0   1.8G  0 disk [SWAP]\nnvme0n1      259:0    0 238.5G  0 disk \n

        Identify the device corresponding to your SSD. In this case, it is nvme0n1.

      2. Format the SSD, create a mount point, and mount it to the filesystem.

        sudo mkfs.ext4 /dev/nvme0n1\n

        You can choose any name for the mount point directory. We use /ssd here, but in jetson-containers' setup.md documentation, /mnt is used.

        sudo mkdir /ssd\n
        sudo mount /dev/nvme0n1 /ssd\n
      3. In order to ensure that the mount persists after boot, add an entry to the fstab file:

        First, identify the UUID for your SSD:

        lsblk -f\n

        Then, add a new entry to the fstab file:

        sudo vi /etc/fstab\n

        Insert the following line, replacing the UUID with the value found from lsblk -f:

        UUID=************-****-****-****-******** /ssd/ ext4 defaults 0 2\n
      4. Finally, change the ownership of the /ssd directory.

        sudo chown ${USER}:${USER} /ssd\n
      "},{"location":"tips_ssd-docker.html#docker","title":"Docker","text":"
      1. Install nvidia-container package.

        Note: If you used an NVIDIA-supplied SD card image to flash your SD card, all necessary JetPack components (including nvidia-containers) and Docker are already pre-installed, so this step can be skipped.

        sudo apt update\nsudo apt install -y nvidia-container\n

        JetPack 6.0 DP users

        If you flash Jetson Linux (L4T) R36.2 (JetPack 6.0 DP) on your Jetson using SDK Manager, and install nvidia-container using apt, on JetPack 6.0 it no longer automatically installs Docker.

        Therefore, you need to run the following to manually install Docker and set it up.

        sudo apt update\nsudo apt install -y nvidia-container curl\ncurl https://get.docker.com | sh && sudo systemctl --now enable docker\nsudo nvidia-ctk runtime configure --runtime=docker\n
      2. Restart the Docker service and add your user to the docker group, so that you don't need to use the command with sudo.

        sudo systemctl restart docker\nsudo usermod -aG docker $USER\nnewgrp docker\n
      3. Add default runtime in /etc/docker/daemon.json

        sudo vi /etc/docker/daemon.json\n

        Insert the \"default-runtime\": \"nvidia\" line as following:

        {\n\"runtimes\": {\n\"nvidia\": {\n\"path\": \"nvidia-container-runtime\",\n\"runtimeArgs\": []\n}\n},\n\"default-runtime\": \"nvidia\"\n}\n
      4. Restart Docker

        sudo systemctl daemon-reload && sudo systemctl restart docker\n
      "},{"location":"tips_ssd-docker.html#migrate-docker-directory-to-ssd","title":"Migrate Docker directory to SSD","text":"

      Now that the SSD is installed and available on your device, you can use the extra storage capacity to hold the storage-demanding Docker directory.

      1. Stop the Docker service.

        sudo systemctl stop docker\n
      2. Move the existing Docker folder

        sudo du -csh /var/lib/docker/ && \\\nsudo mkdir /ssd/docker && \\\nsudo rsync -axPS /var/lib/docker/ /ssd/docker/ && \\\nsudo du -csh  /ssd/docker/ 
      3. Edit /etc/docker/daemon.json

        sudo vi /etc/docker/daemon.json\n

        Insert \"data-root\" line like the following.

        {\n\"runtimes\": {\n\"nvidia\": {\n\"path\": \"nvidia-container-runtime\",\n\"runtimeArgs\": []\n}\n},\n\"default-runtime\": \"nvidia\",\n\"data-root\": \"/ssd/docker\"\n}\n
      4. Rename the old Docker data directory

        sudo mv /var/lib/docker /var/lib/docker.old\n
      5. Restart the docker daemon

        sudo systemctl daemon-reload && \\\nsudo systemctl restart docker && \\\nsudo journalctl -u docker\n
      "},{"location":"tips_ssd-docker.html#test-docker-on-ssd","title":"Test Docker on SSD","text":"
      1. [Terminal 1] First, open a terminal to monitor the disk usage while pulling a Docker image.

        watch -n1 df 
      2. [Terminal 2] Next, open a new terminal and start Docker pull.

        docker pull nvcr.io/nvidia/l4t-base:r35.2.1\n
      3. [Terminal 1] Observe that the disk usage on /ssd goes up as the container image is downloaded and extracted.

        ~$ docker image ls\nREPOSITORY                  TAG       IMAGE ID       CREATED        SIZE\nnvcr.io/nvidia/l4t-base     r35.2.1   dc07eb476a1d   7 months ago   713MB\n
      "},{"location":"tips_ssd-docker.html#final-verification","title":"Final Verification","text":"

      Reboot your Jetson, and verify that you observe the following:

      ~$ sudo blkid | grep nvme\n/dev/nvme0n1: UUID=\"9fc06de1-7cf3-43e2-928a-53a9c03fc5d8\" TYPE=\"ext4\"\n\n~$ df -h\nFilesystem      Size  Used Avail Use% Mounted on\n/dev/mmcblk1p1  116G   18G   94G  16% /\nnone            3.5G     0  3.5G   0% /dev\ntmpfs           3.6G  108K  3.6G   1% /dev/shm\ntmpfs           734M   35M  699M   5% /run\ntmpfs           5.0M  4.0K  5.0M   1% /run/lock\ntmpfs           3.6G     0  3.6G   0% /sys/fs/cgroup\ntmpfs           734M   88K  734M   1% /run/user/1000\n/dev/nvme0n1    458G  824M  434G   1% /ssd\n\n~$ docker info | grep Root\n Docker Root Dir: /ssd/docker\n\n~$ sudo ls -l /ssd/docker/\ntotal 44\ndrwx--x--x  4 root root 4096 Mar 22 11:44 buildkit\ndrwx--x---  2 root root 4096 Mar 22 11:44 containers\ndrwx------  3 root root 4096 Mar 22 11:44 image\ndrwxr-x---  3 root root 4096 Mar 22 11:44 network\ndrwx--x--- 13 root root 4096 Mar 22 16:20 overlay2\ndrwx------  4 root root 4096 Mar 22 11:44 plugins\ndrwx------  2 root root 4096 Mar 22 16:19 runtimes\ndrwx------  2 root root 4096 Mar 22 11:44 swarm\ndrwx------  2 root root 4096 Mar 22 16:20 tmp\ndrwx------  2 root root 4096 Mar 22 11:44 trust\ndrwx-----x  2 root root 4096 Mar 22 16:19 volumes\n\n~$ sudo du -chs /ssd/docker/\n752M    /ssd/docker/\n752M    total\n\n~$ docker info | grep -e \"Runtime\" -e \"Root\"\nRuntimes: io.containerd.runtime.v1.linux nvidia runc io.containerd.runc.v2\n Default Runtime: nvidia\n Docker Root Dir: /ssd/docker\n

      Your Jetson is now set up with the SSD!

      "},{"location":"try.html","title":"Try","text":"

      Jump to NVIDIA Jetson Store.

      "},{"location":"tutorial-intro.html","title":"Tutorial - Introduction","text":""},{"location":"tutorial-intro.html#overview","title":"Overview","text":"

      Our tutorials are divided into categories roughly based on model modality, the type of data to be processed or generated.

      "},{"location":"tutorial-intro.html#text-llm","title":"Text (LLM)","text":"text-generation-webui Interact with a local AI assistant by running a LLM with oobabooga's text-generaton-webui llamaspeak Talk live with Llama using Riva ASR/TTS, and chat about images with Llava! Small LLM (SLM) Deploy Small Language Models (SLM) with reduced memory usage and higher throughput. API Examples Learn how to write Python code for doing LLM inference using popular APIs."},{"location":"tutorial-intro.html#text-vision-vlm","title":"Text + Vision (VLM)","text":"

      Give your locally running LLM an access to vision!

      Mini-GPT4 Mini-GPT4, an open-source model that demonstrate vision-language capabilities. LLaVA Large Language and Vision Assistant, multimodal model that combines a vision encoder and LLM for visual and language understanding. Live LLaVA Run multimodal models interactively on live video streams over a repeating set of prompts. NanoVLM Use mini vision/language models and the optimized multimodal pipeline for live streaming."},{"location":"tutorial-intro.html#image-generation","title":"Image Generation","text":"Stable Diffusion Run AUTOMATIC1111's stable-diffusion-webui to generate images from prompts Stable Diffusion XL A newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities."},{"location":"tutorial-intro.html#vision-transformers-vit","title":"Vision Transformers (ViT)","text":"EfficientVIT MIT Han Lab's EfficientViT, Multi-Scale Linear Attention for High-Resolution Dense Prediction NanoOWL OWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT NanoSAM NanoSAM, SAM model variant capable of running in real-time on Jetson SAM Meta's SAM, Segment Anything model TAM TAM, Track-Anything model, is an interactive tool for video object tracking and segmentation"},{"location":"tutorial-intro.html#vector-database","title":"Vector Database","text":"NanoDB Interactive demo to witness the impact of Vector Database that handles multimodal data"},{"location":"tutorial-intro.html#audio","title":"Audio","text":"AudioCraft Meta's AudioCraft, to produce high-quality audio and music Whisper OpenAI's Whisper, pre-trained model for automatic speech recognition (ASR)"},{"location":"tutorial-intro.html#metropolis-microservices","title":"Metropolis Microservices","text":"First Steps Get Metropolis Microservices up & running on Jetson with NVStreamer and AI NVR capabilities."},{"location":"tutorial-intro.html#about-nvidia-jetson","title":"About NVIDIA Jetson","text":"

      Note

      We are mainly targeting Jetson Orin generation devices for deploying the latest LLMs and generative AI models.

      Jetson AGX Orin 64GB Developer Kit Jetson AGX Orin Developer Kit Jetson Orin Nano Developer Kit GPU 2048-core NVIDIA Ampere architecture GPU with 64 Tensor Cores 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores RAM(CPU+GPU) 64GB 32GB 8GB Storage 64GB eMMC (+ NVMe SSD) microSD card (+ NVMe SSD)"},{"location":"tutorial_api-examples.html","title":"Tutorial - API Examples","text":"

      It's good to know the code for generating text with LLM inference, and ancillary things like tokenization, chat templates, and prompting. On this page we give Python examples of running various LLM APIs, and their benchmarks.

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)\u26a0\ufe0f

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35) JetPack 6 (L4T r36)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 22GB for l4t-text-generation container image
        • Space for models (>10GB)
      "},{"location":"tutorial_api-examples.html#transformers","title":"Transformers","text":"

      The HuggingFace Transformers API is the de-facto API that models are released for, often serving as the reference implementation. It's not terribly fast, but it does have broad model support, and also supports quantization (AutoGPTQ, AWQ). This uses streaming:

      from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer\nfrom threading import Thread\n\nmodel_name='meta-llama/Llama-2-7b-chat-hf'\nmodel = AutoModelForCausalLM.from_pretrained(model_name, device_map='cuda')\n\ntokenizer = AutoTokenizer.from_pretrained(model_name)\nstreamer = TextIteratorStreamer(tokenizer)\n\nprompt = [{'role': 'user', 'content': 'Can I get a recipe for French Onion soup?'}]\ninputs = tokenizer.apply_chat_template(\n    prompt,\n    add_generation_prompt=True,\n    return_tensors='pt'\n).to(model.device)\n\nThread(target=lambda: model.generate(inputs, max_new_tokens=256, streamer=streamer)).start()\n\nfor text in streamer:\n    print(text, end='', flush=True)\n

      To run this (it can be found here), you can mount a directory containing the script or your jetson-containers directory:

      jetson-containers run --volume $PWD/packages/llm:/mount --workdir /mount \\\n$(autotag l4t-text-generation) \\\npython3 transformers/test.py\n

      We use the l4t-text-generation container because it includes the quantization libraries in addition to Transformers, for running the quanztized versions of the models like TheBloke/Llama-2-7B-Chat-GPTQ

      "},{"location":"tutorial_api-examples.html#benchmarks","title":"Benchmarks","text":"

      The huggingface-benchmark.py script will benchmark the models:

      ./run.sh --volume $PWD/packages/llm/transformers:/mount --workdir /mount \\\n$(./autotag l4t-text-generation) \\\npython3 huggingface-benchmark.py --model meta-llama/Llama-2-7b-chat-hf\n
      * meta-llama/Llama-2-7b-chat-hf  AVG = 20.7077 seconds,  6.2 tokens/sec  memory=10173.45 MB\n* TheBloke/Llama-2-7B-Chat-GPTQ  AVG = 12.3922 seconds, 10.3 tokens/sec  memory=7023.36 MB\n* TheBloke/Llama-2-7B-Chat-AWQ   AVG = 11.4667 seconds, 11.2 tokens/sec  memory=4662.34 MB\n
      "},{"location":"tutorial_api-examples.html#nanollm","title":"NanoLLM","text":"

      The NanoLLM library uses the optimized MLC/TVM library for inference, like on the Benchmarks page:

      from nano_llm import NanoLLM, ChatHistory, ChatTemplates\n\n# load model\nmodel = NanoLLM.from_pretrained(\n    model='meta-llama/Llama-2-7b-chat-hf', \n    quantization='q4f16_ft', \n    api='mlc'\n)\n\n# create the chat history\nchat_history = ChatHistory(model, system_prompt=\"You are a helpful and friendly AI assistant.\")\n\nwhile True:\n    # enter the user query from terminal\n    print('>> ', end='', flush=True)\n    prompt = input().strip()\n\n    # add user prompt and generate chat tokens/embeddings\n    chat_history.append(role='user', msg=prompt)\n    embedding, position = chat_history.embed_chat()\n\n    # generate bot reply\n    reply = model.generate(\n        embedding, \n        streaming=True, \n        kv_cache=chat_history.kv_cache,\n        stop_tokens=chat_history.template.stop,\n        max_new_tokens=256,\n    )\n\n    # append the output stream to the chat history\n    bot_reply = chat_history.append(role='bot', text='')\n\n    for token in reply:\n        bot_reply.text += token\n        print(token, end='', flush=True)\n\n    print('\\n')\n\n    # save the inter-request KV cache \n    chat_history.kv_cache = reply.kv_cache\n

      This example keeps an interactive chat running with text being entered from the terminal. You can start it like this:

      jetson-containers run $(autotag nano_llm) \\\n    python3 -m nano_llm.chat.example\n

      Or for easy editing from the host device, copy the source into your own script and mount it into the container with the --volume flag.

      "},{"location":"tutorial_audiocraft.html","title":"Tutorial - AudioCraft","text":"

      Let's run Meta's AudioCraft, to produce high-quality audio and music on Jetson!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 10.7 GB for audiocraft container image
        • Space for checkpoints
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"tutorial_audiocraft.html#how-to-start","title":"How to start","text":"

      Use run.sh and autotag script to automatically pull or build a compatible container image.

      jetson-containers run $(autotag audiocraft)\n

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

      Open your browser and access http://<IP_ADDRESS>:8888.

      The default password for Jupyter Lab is nvidia.

      "},{"location":"tutorial_audiocraft.html#run-jupyter-notebooks","title":"Run Jupyter notebooks","text":"

      AudioCraft repo comes with demo Jupyter notebooks.

      On Jupyter Lab navigation pane on the left, double-click demos folder.

      "},{"location":"tutorial_audiocraft.html#audiogen-demo","title":"AudioGen demo","text":"

      For \"Text-conditional Generation\", you should get something like this.

      Your browser does not support the audio element.

      Info

      You may encounter an error message like the following when executing the first cell, but you can keep going.

      A matching Triton is not available, some optimizations will not be enabled.\nError caught was: No module named 'triton'\n

      Warning

      When running the 5-th cell of audiogen_demo.ipynb, you may run into \"Failed to load audio\" RuntimeError.

      "},{"location":"tutorial_audiocraft.html#musicgen-demo","title":"MusicGen demo","text":"

      For \"Text-conditional Generation\", you should get something like this.

      Your browser does not support the audio element.

      Warning

      When running the 5-th cell of musicgen_demo.ipynb, you may run into \"Failed to load audio\" RuntimeError.

      "},{"location":"tutorial_distillation.html","title":"CLIP model distillation","text":"

      See \"Jetson Introduction to Knowledge Distillation\" repo's README.md.

      https://github.com/NVIDIA-AI-IOT/jetson-intro-to-distillation

      "},{"location":"tutorial_live-llava.html","title":"Tutorial - Live LLaVA","text":"

      Recommended

      Follow the chat-based LLaVA and NanoVLM tutorials to familiarize yourself with vision/language models and test the models first.

      This multimodal agent runs a vision-language model on a live camera feed or video stream, repeatedly applying the same prompts to it:

      It uses models like LLaVA or VILA (based on Llama and CLIP) and has been quantized with 4-bit precision to be deployed on Jetson Orin. This runs an optimized multimodal pipeline from the NanoLLM library, including event filters and alerts, and multimodal RAG:

      "},{"location":"tutorial_live-llava.html#running-the-live-llava-demo","title":"Running the Live Llava Demo","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)\u26a0\ufe0f

      2. Running one of the following versions of JetPack:

        JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 22GB for nano_llm container image
        • Space for models (>10GB)
      4. Follow the chat-based LLaVA and NanoVLM tutorials first.

      5. Supported vision/language models:

        • liuhaotian/llava-v1.5-7b, liuhaotian/llava-v1.5-13b, liuhaotian/llava-v1.6-vicuna-7b, liuhaotian/llava-v1.6-vicuna-13b
        • Efficient-Large-Model/VILA-2.7b,Efficient-Large-Model/VILA-7b, Efficient-Large-Model/VILA-13b
        • NousResearch/Obsidian-3B-V0.5
        • VILA-2.7b, VILA-7b, Llava-7b, and Obsidian-3B can run on Orin Nano 8GB

      The VideoQuery agent applies prompts to the incoming video feed with the VLM. Navigate your browser to https://<IP_ADDRESS>:8050 after launching it with your camera (Chrome is recommended with chrome://flags#enable-webrtc-hide-local-ips-with-mdns disabled)

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.agents.video_query --api=mlc \\\n--model Efficient-Large-Model/VILA-2.7b \\\n--max-context-len 768 \\\n--max-new-tokens 32 \\\n--video-input /dev/video0 \\\n--video-output webrtc://@:8554/output\n

      This uses jetson_utils for video I/O, and for options related to protocols and file formats, see Camera Streaming and Multimedia. In the example above, it captures a V4L2 USB webcam connected to the Jetson (under the device /dev/video0) and outputs a WebRTC stream.

      "},{"location":"tutorial_live-llava.html#processing-a-video-file-or-stream","title":"Processing a Video File or Stream","text":"

      The example above was running on a live camera, but you can also read and write a video file or network stream by substituting the path or URL to the --video-input and --video-output command-line arguments like this:

      jetson-containers run \\\n-v /path/to/your/videos:/mount\n  $(autotag nano_llm) \\\npython3 -m nano_llm.agents.video_query --api=mlc \\\n--model Efficient-Large-Model/VILA-2.7b \\\n--max-new-tokens 32 \\\n--video-input /mount/my_video.mp4 \\\n--video-output /mount/output.mp4 \\\n--prompt \"What does the weather look like?\"\n

      This example processes and pre-recorded video (in MP4, MKV, AVI, FLV formats with H.264/H.265 encoding), but it also can input/output live network streams like RTP, RTSP, and WebRTC using Jetson's hardware-accelerated video codecs.

      "},{"location":"tutorial_live-llava.html#nanodb-integration","title":"NanoDB Integration","text":"

      If you launch the VideoQuery agent with the --nanodb flag along with a path to your NanoDB database, it will perform reverse-image search on the incoming feed against the database by re-using the CLIP embeddings generated by the VLM.

      To enable this mode, first follow the NanoDB tutorial to download, index, and test the database. Then launch VideoQuery like this:

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.agents.video_query --api=mlc \\\n--model Efficient-Large-Model/VILA-2.7b \\\n--max-context-len 768 \\\n--max-new-tokens 32 \\\n--video-input /dev/video0 \\\n--video-output webrtc://@:8554/output \\\n--nanodb /data/nanodb/coco/2017\n

      You can also tag incoming images and add them to the database using the panel in the web UI.

      "},{"location":"tutorial_llamaspeak.html","title":"Tutorial - llamaspeak","text":"

      Talk live with Llama using ASR/TTS, and chat about images with Llava!

      • The NanoLLM library provides optimized inference for LLM and speech models.
      • It's recommended to run JetPack 6.0 to be able to run the latest containers.

      The WebChat agent has conversational abilities and multimodal support for chatting about images with vision/language models.

      "},{"location":"tutorial_llamaspeak.html#running-llamaspeak","title":"Running llamaspeak","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB)

      2. Running one of the following versions of JetPack:

        JetPack 6 (L4T r36)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 22GB for nano_llm container image
        • Space for models (>10GB)
      4. Start the Riva server first and test the ASR/TTS services.

      jetson-containers run --env HUGGINGFACE_TOKEN=hf_xyz123abc456 \\\n$(autotag nano_llm) \\\npython3 -m nano_llm.agents.web_chat --api=mlc \\\n--model meta-llama/Llama-2-7b-chat-hf \\\n--asr=riva --tts=riva\n

      This will start llamaspeak with text LLM and Riva for ASR/TTS. You can then navigate your browser to https://IP_ADDRESS:8050

      • The default port is 8050, but can be changed with --web-port (and --ws-port for the websocket port)
      • To debug issues with client/server communication, use --verbose and --web-trace to print incoming/outgoing websocket messages.
      • During bot replies, the TTS model will pause output if you speak a few words in the mic to interrupt it.

      "},{"location":"tutorial_llamaspeak.html#multimodality","title":"Multimodality","text":"

      If you load a multimodal vision/language model instead, you can drag images into the chat and ask questions about them:

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.agents.web_chat --api=mlc \\\n--model Efficient-Large-Model/VILA-7b \\\n--asr=riva --tts=riva\n

      For more info about the supported vision/language models, see the NanoVLM page.

      "},{"location":"tutorial_llava.html","title":"Tutorial - LLaVA","text":"

      LLaVA is a popular multimodal vision/language model that you can run locally on Jetson to answer questions about image prompts and queries. Llava uses the CLIP vision encoder to transform images into the same embedding space as its LLM (which is the same as Llama architecture). Below we cover different methods to run Llava on Jetson, with increasingly optimized performance:

      1. Chat with Llava using text-generation-webui
      2. Run from the terminal with llava.serve.cli
      3. Quantized GGUF models with llama.cpp
      4. Optimized Multimodal Pipeline with NanoVLM
      Llava-13B (Jetson AGX Orin) Quantization Tokens/sec Memory text-generation-webui 4-bit (GPTQ) 2.3 9.7 GB llava.serve.cli FP16 (None) 4.2 27.7 GB llama.cpp 4-bit (Q4_K) 10.1 9.2 GB NanoVLM 4-bit (MLC) 21.1 8.7 GB

      In addition to Llava, the NanoVLM pipeline supports VILA and mini vision models that run on Orin Nano as well.

      "},{"location":"tutorial_llava.html#1-chat-with-llava-using-text-generation-webui","title":"1. Chat with Llava using text-generation-webui","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.2GB for text-generation-webui container image
        • Space for models
          • CLIP model : 1.7GB
          • Llava-v1.5-13B-GPTQ model : 7.25GB
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"tutorial_llava.html#download-model","title":"Download Model","text":"
      jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \\\n  python3 download-model.py --output=/data/models/text-generation-webui \\\n    TheBloke/llava-v1.5-13B-GPTQ\n
      "},{"location":"tutorial_llava.html#start-web-ui-with-multimodal-extension","title":"Start Web UI with Multimodal Extension","text":"
      jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \\\n  python3 server.py --listen \\\n    --model-dir /data/models/text-generation-webui \\\n    --model TheBloke_llava-v1.5-13B-GPTQ \\\n    --multimodal-pipeline llava-v1.5-13b \\\n    --loader autogptq \\\n    --disable_exllama \\\n    --verbose\n

      Go to Chat tab, drag and drop an image into the Drop Image Here area, and your question in the text area and hit Generate:

      "},{"location":"tutorial_llava.html#result","title":"Result","text":""},{"location":"tutorial_llava.html#2-run-from-the-terminal-with-llavaservecli","title":"2. Run from the terminal with llava.serve.cli","text":"

      What you need

      1. One of the following Jetson:

        Jetson AGX Orin 64GB Jetson AGX Orin (32GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.1GB for llava container
        • 14GB for Llava-7B (or 26GB for Llava-13B)

      This example uses the upstream Llava repo to run the original, unquantized Llava models from the command-line. It uses more memory due to using FP16 precision, and is provided mostly as a reference for debugging. See the Llava container readme for more info.

      "},{"location":"tutorial_llava.html#llava-v15-7b","title":"llava-v1.5-7b","text":"
      jetson-containers run $(autotag llava) \\\n  python3 -m llava.serve.cli \\\n    --model-path liuhaotian/llava-v1.5-7b \\\n    --image-file /data/images/hoover.jpg\n
      "},{"location":"tutorial_llava.html#llava-v15-13b","title":"llava-v1.5-13b","text":"
      jetson-containers run $(autotag llava) \\\npython3 -m llava.serve.cli \\\n--model-path liuhaotian/llava-v1.5-13b \\\n--image-file /data/images/hoover.jpg\n

      Unquantized 13B may run only on Jetson AGX Orin 64GB due to memory requirements.

      "},{"location":"tutorial_llava.html#3-quantized-gguf-models-with-llamacpp","title":"3. Quantized GGUF models with llama.cpp","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      llama.cpp is one of the faster LLM API's, and can apply a variety of quantization methods to Llava to reduce its memory usage and runtime. Despite its name, it uses CUDA. There are pre-quantized versions of Llava-1.5 available in GGUF format for 4-bit and 5-bit:

      • mys/ggml_llava-v1.5-7b
      • mys/ggml_llava-v1.5-13b
      jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \\\n/bin/bash -c './llava-cli \\\n    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \\\n    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \\\n    --n-gpu-layers 999 \\\n    --image /data/images/hoover.jpg \\\n    --prompt \"What does the sign say\"'\n
      Quantization Bits Response Tokens/sec Memory Q4_K 4 The sign says \"Hoover Dam, Exit 9.\" 10.17 9.2 GB Q5_K 5 The sign says \"Hoover Dam exit 9.\" 9.73 10.4 GB

      A lower temperature like 0.1 is recommended for better quality (--temp 0.1), and if you omit --prompt it will describe the image:

      jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \\\n/bin/bash -c './llava-cli \\\n    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \\\n    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \\\n    --n-gpu-layers 999 \\\n    --image /data/images/lake.jpg'\n\nIn this image, a small wooden pier extends out into a calm lake, surrounded by tall trees and mountains. The pier seems to be the only access point to the lake. The serene scene includes a few boats scattered across the water, with one near the pier and the others further away. The overall atmosphere suggests a peaceful and tranquil setting, perfect for relaxation and enjoying nature.\n

      You can put your own images in the mounted jetson-containers/data directory. The C++ code for llava-cli can be found here. The llama-cpp-python bindings also support Llava, however they are significantly slower from Python for some reason (potentially pre-processing)

      "},{"location":"tutorial_llava.html#4-optimized-multimodal-pipeline-with-nanovlm","title":"4. Optimized Multimodal Pipeline with NanoVLM","text":"

      What's Next

      This section got too long and was moved to the NanoVLM page - check it out there for performance optimizations, mini VLMs, and live streaming!

      "},{"location":"tutorial_minigpt4.html","title":"Tutorial - MiniGPT-4","text":"

      Give your locally running LLM an access to vision, by running MiniGPT-4 on Jetson!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 5.8GB for container image
        • Space for pre-quantized MiniGPT-4 model
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"tutorial_minigpt4.html#start-minigpt4-container-with-models","title":"Start minigpt4 container with models","text":"

      To start the MiniGPT4 container and webserver with the recommended models, run this command:

      jetson-containers run $(autotag minigpt4) /bin/bash -c 'cd /opt/minigpt4.cpp/minigpt4 && python3 webui.py \\\n  $(huggingface-downloader --type=dataset maknee/minigpt4-13b-ggml/minigpt4-13B-f16.bin) \\\n  $(huggingface-downloader --type=dataset maknee/ggml-vicuna-v0-quantized/ggml-vicuna-13B-v0-q5_k.bin)'\n

      Then, open your web browser and access http://<IP_ADDRESS>:7860.

      "},{"location":"tutorial_minigpt4.html#results","title":"Results","text":""},{"location":"tutorial_mmj.html","title":"First Steps","text":"

      17# First steps with Metropolis Microservices for Jetson

      NVIDIA Metropolis Microservices for Jetson simplifies the development of vision AI applications, offering a suite of customizable, cloud-native tools. Before diving into this tutorial, ensure you've filled out the Metropolis Microservices for Jetson Early Access form to gain the necessary access to launch the services. This step is crucial as it enables you to utilize all the features and capabilities discussed in this guide.

      Perfect for both newcomers and experts, this tutorial provides straightforward steps to kick-start your edge AI projects. Whether you're a student or an ecosystem partner working on a use case, this guide offers a straightforward start for every skill level.

      "},{"location":"tutorial_mmj.html#0-install-nvidia-jetson-services","title":"0. Install NVIDIA Jetson Services:","text":"

      Ok, let's start by installing NVIDIA Jetson Services:

      sudo apt install nvidia-jetson-services\n

      Let's add some performance hacks that will be needed to run the demo faster and without streaming artifacts:

      • If you don't have the Orin at max performance, you can use these two commands, a reboot is needed after:

        sudo nvpmodel -m 0 \nsudo jetson_clocks\n

      • After these two commands, a reboot is needed if your Jetson wasn't already in high-performance mode. These are optional, but they fine-tune your network buffers to ensure smoother streaming by optimizing how much data can be sent and received:

        sudo sysctl -w net.core.rmem_default=2129920\nsudo sysctl -w net.core.rmem_max=10000000\nsudo sysctl -w net.core.wmem_max=2000000\n

      "},{"location":"tutorial_mmj.html#1-download-nvidia-cli-for-jetson","title":"1. Download NVIDIA CLI for Jetson","text":"

      Download NGC for ARM64 from the NGC for CLI site:

      unzip ngccli_arm64.zip\nchmod u+x ngc-cli/ngc\necho \"export PATH=\\\"\\$PATH:$(pwd)/ngc-cli\\\"\" >> ~/.bash_profile && source ~/.bash_profile\nngc config set\n
      Here it will ask for your API Key, and the organization name, to get those you need to login into NGC and generate an API key here.

      You should then paste the API key and use the organization name you are using. You can also press [Enter] to select the default values for the remaining options. After this, you should get the message:

      Successfully saved NGC configuration to /home/jetson/.ngc/config\n

      Then, login with the same API key:

      sudo docker login nvcr.io -u \"\\$oauthtoken\" -p <NGC-API-KEY>\n

      Now launch the Redis and Ingress services, as we need them for this tutorial.

      sudo systemctl start jetson-redis\nsudo systemctl start jetson-ingress\n
      "},{"location":"tutorial_mmj.html#2-download-and-launch-nvstreamer","title":"2. Download and launch NVStreamer","text":""},{"location":"tutorial_mmj.html#_1","title":"First Steps","text":"

      First, we need to install NVStreamer, an app that streams the videos MMJs will need to run AI on them. Follow this NVStreamer Link (In the top-left, click Download files.zip)

      unzip files.zip\nrm files.zip\ntar -xvf nvstreamer.tar.gz\ncd nvstreamer\n
      Launch it:
      sudo docker compose -f compose_nvstreamer.yaml up -d  --force-recreate\n

      "},{"location":"tutorial_mmj.html#3-download-ai_nvr-and-launch","title":"3. Download AI_NVR and launch:","text":""},{"location":"tutorial_mmj.html#_2","title":"First Steps","text":"

      AI NVR (NGC) Link (Top-left -> Download files.zip)

      unzip files.zip\nrm files.zip\ntar -xvf ai_nvr.tar.gz\nsudo cp ai_nvr/config/ai-nvr-nginx.conf /opt/nvidia/jetson/services/ingress/config/\ncd ai_nvr\nsudo docker compose -f compose_agx.yaml up -d --force-recreate\n
      "},{"location":"tutorial_mmj.html#4-download-some-sample-videos-and-upload-them-to-nvstreamer","title":"4. Download some sample videos and upload them to NVStreamer","text":"

      Download them from here.

      unzip files.zip\n
      Ok, now, this is important to understand, there are 2 web interfaces:

      1. The NVStream Streamer Dashboard, running in: http://localhost:31000
      2. The NVStreamer Camera Management Dashboard, running in: http://localhost:31000

      So, first we need to upload the file in the Streamer interface, it looks like this:

      There, go to File Upload, and drag and drop the file in the upload squared area.

      After uploading it, go to the Dashboad option of the left menu, and copy the RTSP URL of the video you just uploaded, you will need it for the Camera Management Dashboard.

      Now jump to the Camera Management Dashboard (http://localhost:30080/vst), it looks like this:

      Go to the Camera Management option of the menu, then use the Add device manually option, and paste the RTSP URL, add the name of your video to the Name and Location text boxes, so it will be displayed on top of the stream.

      Finally, click in the Live Streams option of the left menu, and you should be able to watch your video stream.

      "},{"location":"tutorial_mmj.html#5-watch-rtsp-ai-processed-streaming-from-vlc","title":"5. Watch RTSP AI processed streaming from VLC","text":"

      Open VLC from another computer (localhost doesn't work here), and point to your Jetson Orin's IP address (you should be in the same network, or not having a firewal to access).

      The easiest way to get Jetson's ip is launching:

      ifconfig\n
      And checking the IP of the interface (usually wlan0, inet IP).

      Then go to rtsp://[JETSON_IP]:8555/ds-test using VLC like this:

      "},{"location":"tutorial_mmj.html#6-android-app","title":"6. Android app","text":"

      There is an Android app that allows you to track events and create areas of interest to monitor, you can find it on Google Play as AI NVR.

      Here is a quick walkthough where you can see how to:

      • Add the IP address of the Jetson
      • Track current events
      • Add new areas of interest
      • Add tripwire to track the flux and direction of events

      "},{"location":"tutorial_nano-llm.html","title":"NanoLLM - Optimized LLM Inference","text":"

      NanoLLM is a lightweight, high-performance library using optimized inferencing APIs for quantized LLM\u2019s, multimodality, speech services, vector databases with RAG, and web frontends. It's used to build many of the responsive, low-latency agents featured on this site.

      It provides similar APIs to HuggingFace, backed by highly-optimized inference libraries and quantization tools:

      from nano_llm import NanoLLM\n\nmodel = NanoLLM.from_pretrained(\n   \"meta-llama/Llama-2-7b-hf\",  # HuggingFace repo/model name, or path to HF model checkpoint\n   api='mlc',                   # supported APIs are: mlc, awq, hf\n   api_token='hf_abc123def',    # HuggingFace API key for authenticated models ($HUGGINGFACE_TOKEN)\n   quantization='q4f16_ft'      # q4f16_ft, q4f16_1, q8f16_0 for MLC, or path to AWQ weights\n)\n\nresponse = model.generate(\"Once upon a time,\", max_new_tokens=128)\n\nfor token in response:\n   print(token, end='', flush=True)\n
      "},{"location":"tutorial_nano-llm.html#resources","title":"Resources","text":"

      Here's an index of the various tutorials & examples using NanoLLM on Jetson AI Lab:

      Benchmarks Benchmarking results for LLM, SLM, VLM using MLC/TVM backend API Examples Python code examples for completion and multi-turn chat Llamaspeak Talk verbally with LLMs using low-latency ASR/TTS speech models Small LLM (SLM) Focus on language models with reduced footprint (7B params and below) Live LLaVA Realtime live-streaming vision/language models on recurring prompts Nano VLM Efficient multimodal pipeline with one-shot RAG support"},{"location":"tutorial_nano-vlm.html","title":"NanoVLM - Efficient Multimodal Pipeline","text":"

      We saw in the previous LLaVA tutorial how to run vision-language models through tools like text-generation-webui and llama.cpp. In a similar vein to the SLM page on Small Language Models, here we'll explore optimizing VLMs for reduced memory usage and higher performance that reaches interactive levels (like in Liva LLava). These are great for fitting on Orin Nano and increasing the framerate.

      There are 3 model families currently supported: Llava, VILA, and Obsidian (mini VLM)

      "},{"location":"tutorial_nano-vlm.html#vlm-benchmarks","title":"VLM Benchmarks","text":"

      This FPS measures the end-to-end pipeline performance for continuous streaming like with Live Llava (on yes/no question)

      \u2022 \u00a0 These models all use CLIP ViT-L/14@336px for the vision encoder. \u2022 \u00a0 Jetson Orin Nano 8GB runs out of memory trying to run Llava-13B.

      "},{"location":"tutorial_nano-vlm.html#multimodal-chat","title":"Multimodal Chat","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)\u26a0\ufe0f

      2. Running one of the following versions of JetPack:

        JetPack 6 (L4T r36)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 22GB for nano_llm container image
        • Space for models (>10GB)
      4. Supported VLM models in NanoLLM:

        • liuhaotian/llava-v1.5-7b, liuhaotian/llava-v1.5-13b, liuhaotian/llava-v1.6-vicuna-7b, liuhaotian/llava-v1.6-vicuna-13b
        • Efficient-Large-Model/VILA-2.7b,Efficient-Large-Model/VILA-7b, Efficient-Large-Model/VILA-13b
        • NousResearch/Obsidian-3B-V0.5
        • VILA-2.7b, VILA-7b, Llava-7b, and Obsidian-3B can run on Orin Nano 8GB

      The optimized NanoLLM library uses MLC/TVM for quantization and inference provides the highest performance. It efficiently manages the CLIP embeddings and KV cache. You can find Python code for the chat program used in this example here.

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.chat --api=mlc \\\n--model liuhaotian/llava-v1.6-vicuna-7b \\\n--max-context-len 768 \\\n--max-new-tokens 128\n

      This starts an interactive console-based chat with Llava, and on the first run the model will automatically be downloaded from HuggingFace and quantized using MLC and W4A16 precision (which can take some time). See here for command-line options.

      You'll end up at a >> PROMPT: in which you can enter the path or URL of an image file, followed by your question about the image. You can follow-up with multiple questions about the same image. Llava does not understand multiple images in the same chat, so when changing images, first reset the chat history by entering clear or reset as the prompt. VILA supports multiple images (area of active research)

      "},{"location":"tutorial_nano-vlm.html#automated-prompts","title":"Automated Prompts","text":"

      During testing, you can specify prompts on the command-line that will run sequentially:

      jetson-containers run $(autotag nano_llm) \\\n  python3 -m nano_llm.chat --api=mlc \\\n    --model liuhaotian/llava-v1.6-vicuna-7b \\\n    --max-context-len 768 \\\n    --max-new-tokens 128 \\\n    --prompt '/data/images/hoover.jpg' \\\n    --prompt 'what does the road sign say?' \\\n    --prompt 'what kind of environment is it?' \\\n    --prompt 'reset' \\\n    --prompt '/data/images/lake.jpg' \\\n    --prompt 'please describe the scene.' \\\n    --prompt 'are there any hazards to be aware of?'\n

      You can also use --prompt /data/prompts/images.json to run the test sequence, the results of which are in the table below.

      "},{"location":"tutorial_nano-vlm.html#results","title":"Results","text":"

      \u2022 \u00a0 The model responses are with 4-bit quantization enabled, and are truncated to 128 tokens for brevity. \u2022 \u00a0 These chat questions and images are from /data/prompts/images.json (found in jetson-containers)

      "},{"location":"tutorial_nano-vlm.html#json","title":"JSON","text":"

      When prompted, these models can also output in constrained JSON formats (which the LLaVA authors cover in their LLaVA-1.5 paper), and can be used to programatically query information about the image:

      jetson-containers run $(autotag nano_llm) \\\n  python3 -m nano_llm.chat --api=mlc \\\n    --model liuhaotian/llava-v1.5-13b \\\n    --prompt '/data/images/hoover.jpg' \\\n    --prompt 'extract any text from the image as json'\n\n{\n  \"sign\": \"Hoover Dam\",\n  \"exit\": \"2\",\n  \"distance\": \"1 1/2 mile\"\n}\n
      "},{"location":"tutorial_nano-vlm.html#web-ui","title":"Web UI","text":"

      To use this through a web browser instead, see the llamaspeak tutorial:

      "},{"location":"tutorial_nano-vlm.html#live-streaming","title":"Live Streaming","text":"

      These models can also be used with the Live Llava agent for continuous streaming - just substitute the desired model name below:

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.agents.video_query --api=mlc \\\n--model Efficient-Large-Model/VILA-2.7b \\\n--max-context-len 768 \\\n--max-new-tokens 32 \\\n--video-input /dev/video0 \\\n--video-output webrtc://@:8554/output\n

      The Live Llava tutorial shows how to enable additional features like vector database integration, image tagging, and RAG.

      "},{"location":"tutorial_nanodb.html","title":"Tutorial - NanoDB","text":"

      Let's run NanoDB's interactive demo to witness the impact of Vector Database that handles multimodal data.

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 16GB for container image
        • 40GB for MS COCO dataset
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"tutorial_nanodb.html#how-to-start","title":"How to start","text":""},{"location":"tutorial_nanodb.html#download-coco","title":"Download COCO","text":"

      Just for an example, let's use MS COCO dataset:

      cd jetson-containers\nmkdir -p data/datasets/coco/2017\ncd data/datasets/coco/2017\n\nwget http://images.cocodataset.org/zips/train2017.zip\nwget http://images.cocodataset.org/zips/val2017.zip\nwget http://images.cocodataset.org/zips/unlabeled2017.zip\n\nunzip train2017.zip\nunzip val2017.zip\nunzip unlabeled2017.zip\n
      "},{"location":"tutorial_nanodb.html#download-index","title":"Download Index","text":"

      You can download a pre-indexed NanoDB that was already prepared over the COCO dataset from here:

      cd jetson-containers/data\nwget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz\ntar -xzvf nanodb_coco_2017.tar.gz\n

      This allow you to skip the indexing process in the next step, and jump to starting the Web UI.

      "},{"location":"tutorial_nanodb.html#indexing-data","title":"Indexing Data","text":"

      If you didn't download the NanoDB index for COCO from above, we need to build the index by scanning your dataset directory:

      jetson-containers run $(autotag nanodb) \\\n  python3 -m nanodb \\\n    --scan /data/datasets/coco/2017 \\\n    --path /data/nanodb/coco/2017 \\\n    --autosave --validate \n

      This will take a few hours on AGX Orin. Once the database has loaded and completed any start-up operations , it will drop down to a > prompt from which the user can run search queries. You can quickly check the operation by typing your query on this prompt:

      > a girl riding a horse\n\n* index=80110   /data/datasets/coco/2017/train2017/000000393735.jpg      similarity=0.29991915822029114\n* index=158747  /data/datasets/coco/2017/unlabeled2017/000000189708.jpg  similarity=0.29254037141799927\n* index=123846  /data/datasets/coco/2017/unlabeled2017/000000026239.jpg  similarity=0.292171448469162\n* index=127338  /data/datasets/coco/2017/unlabeled2017/000000042508.jpg  similarity=0.29118549823760986\n* index=77416   /data/datasets/coco/2017/train2017/000000380634.jpg      similarity=0.28964102268218994\n* index=51992   /data/datasets/coco/2017/train2017/000000256290.jpg      similarity=0.28929752111434937\n* index=228640  /data/datasets/coco/2017/unlabeled2017/000000520381.jpg  similarity=0.28642547130584717\n* index=104819  /data/datasets/coco/2017/train2017/000000515895.jpg      similarity=0.285491943359375\n

      You can press Ctrl+C to exit. For more info about the various options available, see the NanoDB container documentation.

      "},{"location":"tutorial_nanodb.html#interactive-web-ui","title":"Interactive Web UI","text":"

      Spin up the Gradio server:

      jetson-containers run $(autotag nanodb) \\\n  python3 -m nanodb \\\n    --path /data/nanodb/coco/2017 \\\n    --server --port=7860\n

      Then navigate your browser to http://<IP_ADDRESS>:7860, and you can enter text search queries as well as drag/upload images:

      To use the dark theme, navigate to http://<IP_ADDRESS>:7860/?__theme=dark instead"},{"location":"tutorial_slm.html","title":"Tutorial - Small Language Models (SLM)","text":"

      Small Language Models (SLMs) represent a growing class of language models that have <7B parameters - for example StableLM, Phi-2, and Gemma-2B. Their smaller memory footprint and faster performance make them good candidates for deploying on Jetson Orin Nano. Some are very capable with abilities at a similar level as the larger models, having been trained on high-quality curated datasets.

      This tutorial shows how to run optimized SLMs with quantization using the NanoLLM library and MLC/TVM backend. You can run these models through tools like text-generation-webui and llama.cpp as well, just not as fast - and since the focus of SLMs is reduced computational and memory requirements, here we'll use the most optimized path available. Those shown below have been profiled:

      "},{"location":"tutorial_slm.html#slm-benchmarks","title":"SLM Benchmarks","text":"

      \u2022 \u00a0 The HuggingFace Open LLM Leaderboard is a collection of multitask benchmarks including reasoning & comprehension, math, coding, history, geography, ect. \u2022 \u00a0 The model's memory footprint includes 4-bit weights and KV cache at full context length (factor in extra for process overhead, library code, ect) \u2022 \u00a0 The Chat Model is the instruction-tuned variant for chatting with in the commands below, as opposed to the base completion model.

      Based on user interactions, the recommended models to try are stabilityai/stablelm-zephyr-3b and princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT, for having output quality on par with Llama-2-7B and well-optimized neural architectures. These models have also been used as the base for various fine-tunes (for example Nous-Capybara-3B-V1.9) and mini VLMs. Others may not be particularly coherent.

      "},{"location":"tutorial_slm.html#chatting-with-slms","title":"Chatting with SLMs","text":"

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 22GB for nano_llm container image
        • Space for models (>5GB)
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n

      The nano_llm.chat program will automatically download and quantize models from HuggingFace like those listed in the table above:

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.chat --api=mlc \\\n--model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT\n

      \u2022 \u00a0 For models requiring authentication, use --env HUGGINGFACE_TOKEN=<YOUR-ACCESS-TOKEN> \u2022 \u00a0 Press Ctrl+C twice in succession to exit (once will interrupt bot output)

      This will enter into interactive mode where you chat back and forth using the keyboard (entering reset will clear the chat history)

      "},{"location":"tutorial_slm.html#automated-prompts","title":"Automated Prompts","text":"

      During testing, you can specify prompts on the command-line that will run sequentially:

      jetson-containers run $(autotag nano_llm) \\\npython3 -m nano_llm.chat --api=mlc \\\n--model stabilityai/stablelm-zephyr-3b \\\n--max-new-tokens 512 \\\n--prompt 'hi, how are you?' \\\n--prompt 'whats the square root of 900?' \\\n--prompt 'can I get a recipie for french onion soup?'\n

      You can also load JSON files containing prompt sequences, like with --prompt /data/prompts/qa.json (the output of which is below)

      "},{"location":"tutorial_slm.html#results","title":"Results","text":"

      \u2022 \u00a0 The model responses are with 4-bit quantization, and are truncated to 256 tokens for brevity. \u2022 \u00a0 These chat questions are from /data/prompts/qa.json (found in jetson-containers)

      "},{"location":"tutorial_stable-diffusion-xl.html","title":"Tutorial - Stable Diffusion XL","text":"

      Stable Diffusion XL is a newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities. All told, SDXL 1.0 has 6.6 billion model parameters, in comparison to 0.98 billion for the original SD 1.5 model.

      What you need

      1. One of the following Jetson devices (SDXL requires >= ~13GB memory)

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.8GB for container image
        • 12.4GB for SDXL models
      4. Have followed the previous stable-diffusion-webui tutorial and have the webserver container running.

      "},{"location":"tutorial_stable-diffusion-xl.html#downloading-sdxl-models","title":"Downloading SDXL Models","text":"

      Stable Diffusion XL is supported through AUTOMATIC1111's stable-diffusion-webui with some additional settings. First you need to download the SDXL models to your jetson-containers data directory (which is automatically mounted into the container)

      # run these outside of container, and replace CONTAINERS_DIR with the path to the jetson-containers repo on your device\nCONTAINERS_DIR=/path/to/your/jetson-containers\nMODEL_DIR=$CONTAINERS_DIR/data/models/stable-diffusion/models/Stable-diffusion/\n\nsudo chown -R $USER $MODEL_DIR\n\nwget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors\nwget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors\n

      It's assumed that you already have the stable-diffusion-webui container and webserver running from the previous tutorial.

      "},{"location":"tutorial_stable-diffusion-xl.html#sdxl-settings","title":"SDXL Settings","text":"

      After the models have finished downloading, click the \ud83d\udd04 button to refresh the model list in the web UI. Select sd_xl_base_1.0.safetensors from the Stable Diffusion checkpoint drop-down:

      Then under the Generation tab, expand the Refiner section, and select sd_xl_refiner_1.0.safetensors from the drop-down:

      Guidance on other relevant settings:

      • Change the width/height to 1024x1024. SDXL works best at higher resolutions, and using 512x512 often results in more simplistic/cartoonish content. Changing image resolutions impacts the actual scene contents of the image, not just the details.
      • The refiner's Switch at setting dictates the step at which the refiner takes over from the base model. At this point, additional subject content will no longer be added to the scene, but rather its details further refined in the image.
      • Typical Sampling steps are between 20-40 and Switch at is between 0.7-0.9. This takes experimentation to find the best combination for the characteristics of your desired output.
      • Extensive negative prompts are not as necessary in the same way as SD 1.5 was (e.g. low quality, jpeg artifacts, blurry, ect)
      • CFG Scale controls how closely the model conforms to your prompt versus how creative it is.

      When you get a good image, remember to save your random seed and settings so you can re-generate it later!

      "},{"location":"tutorial_stable-diffusion-xl.html#results","title":"Results","text":"

      photograph of a friendly robot alongside a person climbing a mountain (seed 1576166644, steps 25, switch @ 0.8, cfg scale 15)

      a girl and a boy building a friendly robot in their basement workshop (seed 642273464, steps 25, switch @ 0.9, cfg scale 7)

      small friendly robots playing games with people, in a futuristic Tokyo central park gardens with cherry blossoms and water, coy fish swimming in the water, sunshine (seed 642273464, steps 40, switch @ 0.8, cfg scale 7)

      small friendly robots playing games with people in a futuristic New York City Central Park in autumn, water (seed 642273464, steps 25, switch @ 0.8, cfg scale 7)

      Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

      "},{"location":"tutorial_stable-diffusion.html","title":"Tutorial - Stable Diffusion","text":"

      Let's run AUTOMATIC1111's stable-diffusion-webui on NVIDIA Jetson to generate images from our prompts!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.8GB for container image
        • 4.1GB for SD 1.5 model
      "},{"location":"tutorial_stable-diffusion.html#setup-a-container-for-stable-diffusion-webui","title":"Setup a container for stable-diffusion-webui","text":"

      The jetson-containers project provides pre-built Docker images for stable-diffusion-webui. You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

      git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n

      Info

      JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

      "},{"location":"tutorial_stable-diffusion.html#how-to-start","title":"How to start","text":"

      Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

      jetson-containers run $(autotag stable-diffusion-webui)\n

      The container has a default run command (CMD) that will automatically start the webserver like this:

      cd /opt/stable-diffusion-webui && python3 launch.py \\\n  --data=/data/models/stable-diffusion \\\n  --enable-insecure-extension-access \\\n  --xformers \\\n  --listen \\\n  --port=7860\n

      You should see it downloading the model checkpoint on the first run.

      Open your browser and access http://<IP_ADDRESS>:7860

      "},{"location":"tutorial_stable-diffusion.html#results-output-examples","title":"Results / Output Examples","text":""},{"location":"tutorial_stable-diffusion.html#stable-diffusion-xl","title":"Stable Diffusion XL","text":"

      To generate even higher-quality and detailed images, check out the next part of the tutorial that uses the latest Stable Diffusion XL models!

      Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

      "},{"location":"tutorial_text-generation.html","title":"Tutorial - text-generation-webui","text":"

      Interact with a local AI assistant by running a LLM with oobabooga's text-generaton-webui on NVIDIA Jetson!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)\u26a0\ufe0f1

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.2GB for container image
        • Spaces for models
      "},{"location":"tutorial_text-generation.html#set-up-a-container-for-text-generation-webui","title":"Set up a container for text-generation-webui","text":"

      The jetson-containers project provides pre-built Docker images for text-generation-webui along with all of the loader API's built with CUDA enabled (llama.cpp, ExLlama, AutoGPTQ, Transformers, ect). You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

      git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n

      Info

      JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

      "},{"location":"tutorial_text-generation.html#how-to-start","title":"How to start","text":"

      Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

      jetson-containers run $(autotag text-generation-webui)\n

      The container has a default run command (CMD) that will automatically start the webserver like this:

      cd /opt/text-generation-webui && python3 server.py \\\n  --model-dir=/data/models/text-generation-webui \\\n  --chat \\\n  --listen\n

      Open your browser and access http://<IP_ADDRESS>:7860.

      "},{"location":"tutorial_text-generation.html#download-a-model-on-web-ui","title":"Download a model on web UI","text":"

      See the oobabooga documentation for instructions for downloading models - either from within the web UI, or using download-model.py

      jetson-containers run --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \\\n'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'\n

      From within the web UI, select Model tab and navigate to \"Download model or LoRA\" section.

      You can find text generation models on Hugging Face Hub, then enter the Hugging Face username/model path (which you can have copied to your clipboard from the Hub). Then click the Download button.

      "},{"location":"tutorial_text-generation.html#gguf-models","title":"GGUF models","text":"

      The fastest oobabooga model loader to use is currently llama.cpp with 4-bit quantized GGUF models.

      You can download a single model file for a particular quantization, like *.Q4_K_M.bin. Input the file name and hit Download button.

      Model Quantization Memory (MB) TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q4_K_M.gguf 5,268 TheBloke/Llama-2-13B-chat-GGUF llama-2-13b-chat.Q4_K_M.gguf 8,609 TheBloke/LLaMA-30b-GGUF llama-30b.Q4_K_S.gguf 19,045 TheBloke/Llama-2-70B-chat-GGUF llama-2-70b-chat.Q4_K_M.gguf 37,655

      Info

      "},{"location":"tutorial_text-generation.html#model-selection-for-jetson-orin-nano","title":"Model selection for Jetson Orin Nano","text":"

      Jetson Orin Nano Developer Kit has only 8GB RAM for both CPU (system) and GPU, so you need to pick a model that fits in the RAM size - see the Model Size section below. The 7B models with 4-bit quantization are the ones to use on Jetson Orin Nano. Make sure you go through the RAM optimization steps before attempting to load such model on Jetson Orin Nano.

      "},{"location":"tutorial_text-generation.html#load-a-model","title":"Load a model","text":"

      After you have downloaded a model, click the \ud83d\udd04 button to refresh your model list, and select the model you want to use.

      For a GGUF model, remember to

      • Set n-gpu-layers to 128
      • Set n_gqa to 8 if you using Llama-2-70B (on Jetson AGX Orin 64GB)

      Then click the Load button.

      "},{"location":"tutorial_text-generation.html#chat-template","title":"Chat Template","text":"

      If you're using a Llama model fine-tuned for chat, like the models listed above (except for LLaMA-30b), you need to use the oobabooga Instruct mode and set the template. On the Parameters tab, go to the Instruction Template sub-tab, then select Llama-v2 from the Instruction Template drop-down (or Vicuna, Guanaco, ect if you are using one of those models)

      For the base text completion models (like LLaMA-30b), use the Default or Notebook tab.

      Selecting the right chat template will make sure the model is being prompted correctly - you can also change the system prompt in the Context box to alter the agent's personality and behavior. There are a lot of other settings under the Generation tab, like the maximum length it should output per reply, and token sampling parameters like temperature and top_p for controlling randomness.

      Then change back to the Chat tab, and under the mode section, make sure Instruct is selected (confusingly, not chat mode). Then you can start chatting with the LLM!

      "},{"location":"tutorial_text-generation.html#results","title":"Results","text":""},{"location":"tutorial_text-generation.html#things-to-do-with-your-llm","title":"Things to do with your LLM","text":"

      Here are some common test prompts for coding, math, history ect. You can also ask it about geography, travel, nature, recipies, fixing things, general life advice, and practically everything else. Also Llama-2 is quite playful and likes to play games to test its logic abilities!

      >> What games do you like to play?\n\nI'm a large language model, so I can play text-based games and answer questions on a wide variety of topics. Here are some game ideas:\n\n- 20 Questions:  You or I can think of an object, and the other player tries to guess what it is by asking up to 20 yes-or-no questions.  The goal is to guess the object in as few questions as possible.\n- Hangman: I can think of a word or phrase and you try to guess it by suggesting letters. Each incorrect guess will bring the hangman one step closer to being complete!\n- Word association: I'll give you a word and you have to come up with a word that is related to it. We can keep playing back and forth as long as you'd like!\n- Trivia: I can provide questions on a wide range of topics, from history to science to pop culture. See how many questions you can answer correctly!\n- Storytelling: I can start telling a story and then stop at a cliffhanger. You can then try to guess what happens next or even take over the storytelling and continue it in your own\n- Jokes: I love to tell jokes and make people laugh with my \"Dad humor\"!  Knock knock!  *giggles*\n
      "},{"location":"tutorial_text-generation.html#model-size-tested","title":"Model size tested","text":"

      With llama.cpp, GGUF model, 4-bit quantization:

      Model size Jetson AGX Orin 64GB Jetson AGX Orin 32GB Jetson Orin Nano 8GB 70B model \u2705 30B model \u2705 \u2705 13B model \u2705 \u2705 7B model \u2705 \u2705 \u2705

      Want to explore using Python APIs to run LLMs directly? See jetson-containers for its LLM related packages and containers.

      1. Limited to 7B model (4-bit quantized).\u00a0\u21a9

      "},{"location":"tutorial_whisper.html","title":"Tutorial - Whisper","text":"

      Let's run OpenAI's Whisper, pre-trained model for automatic speech recognition on Jetson!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.1 GB for whisper container image
        • Space for checkpoints
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"tutorial_whisper.html#how-to-start","title":"How to start","text":"

      Use run.sh and autotag script to automatically pull or build a compatible container image.

      jetson-containers run $(autotag whisper)\n

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server, with SSL enabled.

      Open your browser and access https://<IP_ADDRESS>:8888.

      Attention

      Note it is https (not http).

      HTTPS (SSL) connection is needed to allow ipywebrtc widget to have access to your microphone (for record-and-transcribe.ipynb).

      You will see a warning message like this.

      Press \"Advanced\" button and then click on \"Proceed to (unsafe)\" link to proceed to the Jupyter Lab web interface.

      The default password for Jupyter Lab is nvidia.

      "},{"location":"tutorial_whisper.html#run-jupyter-notebooks","title":"Run Jupyter notebooks","text":"

      Whisper repo comes with demo Jupyter notebooks, which you can find under /notebooks/ directory.

      jetson-containers also adds one convenient notebook (record-and-transcribe.ipynb) to record your audio sample on Jupyter notebook in order to run transcribe on your recorded audio.

      "},{"location":"tutorial_whisper.html#record-and-transcribeipynb","title":"record-and-transcribe.ipynb","text":"

      This notebook is to let you record your own audio sample using your PC's microphone and apply Whisper's medium model to transcribe the audio sample.

      It uses Jupyter notebook/lab's ipywebrtc extension to record an audio sample on your web browser.

      Attention

      When you click the \u23fa botton, your web browser may show a pop-up to ask you to allow it to use your microphone. Be sure to allow the access.

      Final check

      Once done, if you click on the \"\u26a0 Not secure\" part in the URL bar, you should see something like this.

      "},{"location":"tutorial_whisper.html#result","title":"Result","text":"

      Once you go through all the steps, you should see the transcribe result in text like this.

      "},{"location":"vit/index.html","title":"Vision Transformers","text":"

      You can run following Vision Transfer models and applications quickly on Jetson by using jetson-containers.

      "},{"location":"vit/index.html#efficient-vit","title":"Efficient ViT","text":""},{"location":"vit/index.html#nanosam","title":"NanoSAM","text":""},{"location":"vit/index.html#nanoowl","title":"NanoOWL","text":""},{"location":"vit/index.html#sam","title":"SAM","text":""},{"location":"vit/index.html#tam","title":"TAM","text":""},{"location":"vit/tutorial_efficientvit.html","title":"Tutorial - EfficientViT","text":"

      Let's run MIT Han Lab's EfficientViT on Jetson!

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 10.9 GB for efficientvit container image
        • Space for checkpoints
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"vit/tutorial_efficientvit.html#how-to-start","title":"How to start","text":"

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      jetson-containers run $(autotag efficientvit)\n
      "},{"location":"vit/tutorial_efficientvit.html#usage-of-efficientvit","title":"Usage of EfficientViT","text":"

      The official EfficientViT repo shows the complete usage information: https://github.com/mit-han-lab/efficientvit#usage

      "},{"location":"vit/tutorial_efficientvit.html#run-examplebenchmark","title":"Run example/benchmark","text":"

      Inside the container, a small benchmark script benchmark.py is added under /opt/efficientvit directory by the jetson-container build process.

      It is to test EfficientViT-L2-SAM in bounding box mode, so we can use this as an example and verify the output.

      "},{"location":"vit/tutorial_efficientvit.html#download-l2pt-model","title":"Download l2.pt model","text":"
      mkdir -p /data/models/efficientvit/sam/\ncd /data/models/efficientvit/sam/\nwget https://huggingface.co/han-cai/efficientvit-sam/resolve/main/l2.pt\n

      The downloaded checkpoint file is stored on the /data/ directory that is mounted from the Docker host.

      "},{"location":"vit/tutorial_efficientvit.html#run-benchmark-script","title":"Run benchmark script","text":"
      cd /opt/efficientvit\npython3 ./benchmark.py\n

      At the end you should see a summary like the following.

      AVERAGE of 2 runs:\n  encoder --- 0.062 sec\n  latency --- 0.083 sec\nMemory consumption :  3419.68 MB\n
      "},{"location":"vit/tutorial_efficientvit.html#check-the-outputresult","title":"Check the output/result","text":"

      The output image file (of the last inference result) is stored as /data/benchmarks/efficientvit_sam_demo.png.

      It is stored under /data/ directory that is mounted from the Docker host. So you can go back to your host machine, and check jetson-containers/data/benchmark/ directory.

      You should find the output like this.

      "},{"location":"vit/tutorial_nanoowl.html","title":"Tutorial - NanoOWL","text":"

      Let's run NanoOWL, OWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT.

      What you need

      1. One of the following Jetson:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 7.2 GB for container image
        • Spaces for models
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"vit/tutorial_nanoowl.html#how-to-start","title":"How to start","text":"

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      jetson-containers run $(autotag nanoowl)\n
      "},{"location":"vit/tutorial_nanoowl.html#how-to-run-the-tree-prediction-live-camera-example","title":"How to run the tree prediction (live camera) example","text":"
      1. Ensure you have a camera device connected

        ls /dev/video*\n

        If no video device is found, exit from the container and check if you can see a video device on the host side.

      2. Launch the demo

        cd examples/tree_demo\npython3 tree_demo.py ../../data/owl_image_encoder_patch32.engine\n

        Info

        If it fails to find or load the TensorRT engine file, build the TensorRT engine for the OWL-ViT vision encoder on your Jetson device.

        python3 -m nanoowl.build_image_encoder_engine \\\ndata/owl_image_encoder_patch32.engine\n
      3. Second, open your browser to http://<ip address>:7860

      4. Type whatever prompt you like to see what works!

        Here are some examples

        • Example: [a face [a nose, an eye, a mouth]]
        • Example: [a face (interested, yawning / bored)]
        • Example: (indoors, outdoors)
      "},{"location":"vit/tutorial_nanoowl.html#result","title":"Result","text":""},{"location":"vit/tutorial_nanosam.html","title":"Tutorial - NanoSAM","text":"

      Let's run NVIDIA's NanoSAM to check out the performance gain by distillation.

      What you need

      1. One of the following Jetson:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.3GB for container image
        • Spaces for models
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"vit/tutorial_nanosam.html#how-to-start","title":"How to start","text":"

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      jetson-containers run $(autotag nanosam)\n
      "},{"location":"vit/tutorial_nanosam.html#run-examples","title":"Run examples","text":"

      Inside the container, you can move to /opt/nanosam directory, to go through all the examples demonstrated on the repo.

      cd /opt/nanosam\n

      To run the \"Example 1 - Segment with bounding box\":

      python3 examples/basic_usage.py \\\n    --image_encoder=\"data/resnet18_image_encoder.engine\" \\\n    --mask_decoder=\"data/mobile_sam_mask_decoder.engine\"\n

      The result is saved under /opt/nanosam/data/basic_usage_out.jpg.

      To check on your host machine, you can copy that into /data directory of the container where that is mounted from the host.

      cp data/basic_usage_out.jpg /data/\n

      Then you can go to your host system, and find the file under jetson-containers/data/basic_usage_out.jpg

      "},{"location":"vit/tutorial_nanosam.html#results","title":"Results","text":""},{"location":"vit/tutorial_sam.html","title":"Tutorial - SAM (Segment Anything)","text":"

      Let's run Meta's SAM on NVIDIA Jetson.

      What you need

      1. One of the following Jetson devices:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB) Jetson Orin NX (16GB) Jetson Orin Nano (8GB)\u26a0\ufe0f1

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x) JetPack 6 (L4T r36.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.8GB for container image
        • Spaces for models
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"vit/tutorial_sam.html#how-to-start","title":"How to start","text":"

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      jetson-containers run $(autotag sam)\n

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

      Open your browser and access http://<IP_ADDRESS>:8888.

      The default password for Jupyter Lab is nvidia.

      "},{"location":"vit/tutorial_sam.html#run-jupyter-notebook","title":"Run Jupyter notebook","text":"

      In Jupyter Lab, navigate to notebooks and open automatic_mask_generator_example.py notebook.

      Create a new cell at the top, insert the model download command below and run the cell.

      !wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth\n

      Then go through executing all the cells below Set-up.

      "},{"location":"vit/tutorial_sam.html#results","title":"Results","text":"
      1. The biggest vit_h (2.4GB) model may not ran due to OOM, but vit_l (1.1GB) runs on Jetson Orin Nano.\u00a0\u21a9

      "},{"location":"vit/tutorial_tam.html","title":"Tutorial - SAM (Segment Anything)","text":"

      Let's run TAM to perform Segment Anything on videos on NVIDIA Jetson.

      What you need

      1. One of the following Jetson:

        Jetson AGX Orin (64GB) Jetson AGX Orin (32GB)

      2. Running one of the following versions of JetPack:

        JetPack 5 (L4T r35.x)

      3. Sufficient storage space (preferably with NVMe SSD).

        • 6.8GB for container image
        • Spaces for models
      4. Clone and setup jetson-containers:

        git clone https://github.com/dusty-nv/jetson-containers\nbash jetson-containers/install.sh\n
      "},{"location":"vit/tutorial_tam.html#how-to-start","title":"How to start","text":"

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      jetson-containers run $(autotag tam)\n

      The container has a default run command (CMD) that will automatically start TAM's web server.

      Open your browser and access http://<IP_ADDRESS>:12212.

      "},{"location":"vit/tutorial_tam.html#tam-web-ui","title":"TAM web UI","text":"

      Check out the official tutorial to learn how to operate the web UI.

      "},{"location":"vit/tutorial_tam.html#results","title":"Results","text":""},{"location":"vit/tutorial_tam.html#troubleshooting","title":"Troubleshooting","text":""},{"location":"vit/tutorial_tam.html#filenotfounderror-errno-2-no-such-file-or-directory-checkpointse2fgvi-hq-cvpr22pth","title":"FileNotFoundError: [Errno 2] No such file or directory: './checkpoints/E2FGVI-HQ-CVPR22.pth'","text":"

      You may find the TAM app fails to download a checkpoint file E2FGVI-HQ-CVPR22.pth.

      Downloading checkpoints from Google Drive... tips: If you cannot see the progress bar, please try to download it manuall               and put it in the checkpointes directory. E2FGVI-HQ-CVPR22.pth: https://github.com/MCG-NKU/E2FGVI(E2FGVI-HQ model)\nAccess denied with the following error:\n\n        Cannot retrieve the public link of the file. You may need to change\n        the permission to 'Anyone with the link', or have had many accesses. \n\nYou may still be able to access the file from the browser:\n\n         https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 \n

      You can manually download the checkpoint file on your Docker host machine.

      cd jetson-containers/\npip install gdown\nsource ~/.profile\ngdown https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 \nmv E2FGVI-HQ-CVPR22.pth ./data/models/tam/\n

      And you can try running the TAM container.

      jetson-containers run $(autotag tam)\n
      "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..0f8724ef --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..4362ee8d Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tips_ram-optimization.html b/tips_ram-optimization.html new file mode 100644 index 00000000..b1b87a02 --- /dev/null +++ b/tips_ram-optimization.html @@ -0,0 +1,1361 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 🔖 Memory optimization - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      RAM Optimization

      +

      Running a LLM requires a huge RAM space.

      +

      Especially if you are on Jetson Orin Nano that only has 8GB of RAM, it is crucial to leave as much RAM space available for models.

      +

      Here we share a couple of ways to optimize the system RAM usage.

      +

      Disabling the Desktop GUI

      +

      If you use your Jetson remotely through SSH, you can disable the Ubuntu desktop GUI.
      +This will free up extra memory that the window manager and desktop uses (around ~800MB for Unity/GNOME).

      +

      You can disable the desktop temporarily, run commands in the console, and then re-start the desktop when desired:

      +
      $ sudo init 3     # stop the desktop
      +# log your user back into the console (Ctrl+Alt+F1, F2, ect)
      +$ sudo init 5     # restart the desktop
      +
      +

      If you wish to make this persistent across reboots, you can use the following commands to change the boot-up behavior:

      +
        +
      • +

        To disable desktop on boot

        +
        sudo systemctl set-default multi-user.target
        +
        +
      • +
      • +

        To enable desktop on boot

        +
        sudo systemctl set-default graphical.target
        +
        +
      • +
      +

      Disabling misc services

      +
      sudo systemctl disable nvargus-daemon.service
      +
      +

      Mounting Swap

      +

      If you're building containers or working with large models, it's advisable to mount SWAP (typically correlated with the amount of memory in the board). Run these commands to disable ZRAM and create a swap file:

      +
      +

      If you have NVMe SSD storage available, it's preferred to allocate the swap file on the NVMe SSD.

      +
      +
      sudo systemctl disable nvzramconfig
      +sudo fallocate -l 16G /ssd/16GB.swap
      +sudo mkswap /ssd/16GB.swap
      +sudo swapon /ssd/16GB.swap
      +
      +

      Then add the following line to the end of /etc/fstab to make the change persistent:

      +
      /ssd/16GB.swap  none  swap  sw 0  0
      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tips_ssd-docker.html b/tips_ssd-docker.html new file mode 100644 index 00000000..eecd8156 --- /dev/null +++ b/tips_ssd-docker.html @@ -0,0 +1,1639 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 🔖 SSD + Docker - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tips - SSD + Docker

      +

      Once you have your Jetson set up by flashing the latest Jetson Linux (L4T) BSP on it or by flashing the SD card with the whole JetPack image, before embarking on testing out all the great generative AI application using jetson-containers, you want to make sure you have a huge storage space for all the containers and the models you will download.

      +

      We are going to show how you can install SSD on your Jetson, and set it up for Docker.

      +

      SSD

      +

      Physical installation

      +
        +
      1. Unplug power and any peripherals from the Jetson developer kit.
      2. +
      3. Physically install an NVMe SSD card on the carrier board of your Jetson developer kit, making sure to properly seat the connector and secure with the screw.
      4. +
      5. Reconnect any peripherals, and then reconnect the power supply to turn on the Jetson developer kit.
      6. +
      7. +

        Once the system is up, verify that your Jetson identifies a new memory controller on PCI bus:

        +
        lspci
        +
        +

        The output should look like the following:

        +
        0007:01:00.0 Non-Volatile memory controller: Marvell Technology Group Ltd. Device 1322 (rev 02)
        +
        +
      8. +
      +

      Format and set up auto-mount

      +
        +
      1. +

        Run lsblk to find the device name.

        +
        lsblk
        +
        +

        The output should look like the following:

        +
        NAME         MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
        +loop0          7:0    0    16M  1 loop 
        +mmcblk1      179:0    0  59.5G  0 disk 
        +├─mmcblk1p1  179:1    0    58G  0 part /
        +├─mmcblk1p2  179:2    0   128M  0 part 
        +├─mmcblk1p3  179:3    0   768K  0 part 
        +├─mmcblk1p4  179:4    0  31.6M  0 part 
        +├─mmcblk1p5  179:5    0   128M  0 part 
        +├─mmcblk1p6  179:6    0   768K  0 part 
        +├─mmcblk1p7  179:7    0  31.6M  0 part 
        +├─mmcblk1p8  179:8    0    80M  0 part 
        +├─mmcblk1p9  179:9    0   512K  0 part 
        +├─mmcblk1p10 179:10   0    64M  0 part 
        +├─mmcblk1p11 179:11   0    80M  0 part 
        +├─mmcblk1p12 179:12   0   512K  0 part 
        +├─mmcblk1p13 179:13   0    64M  0 part 
        +└─mmcblk1p14 179:14   0 879.5M  0 part 
        +zram0        251:0    0   1.8G  0 disk [SWAP]
        +zram1        251:1    0   1.8G  0 disk [SWAP]
        +zram2        251:2    0   1.8G  0 disk [SWAP]
        +zram3        251:3    0   1.8G  0 disk [SWAP]
        +nvme0n1      259:0    0 238.5G  0 disk 
        +
        +

        Identify the device corresponding to your SSD. In this case, it is nvme0n1.

        +
      2. +
      3. +

        Format the SSD, create a mount point, and mount it to the filesystem.

        +
        sudo mkfs.ext4 /dev/nvme0n1
        +
        +
        +

        You can choose any name for the mount point directory. We use /ssd here, but in jetson-containers' setup.md documentation, /mnt is used.

        +
        +
        sudo mkdir /ssd
        +
        +
        sudo mount /dev/nvme0n1 /ssd
        +
        +
      4. +
      5. +

        In order to ensure that the mount persists after boot, add an entry to the fstab file:

        +

        First, identify the UUID for your SSD:

        +
        lsblk -f
        +
        +

        Then, add a new entry to the fstab file:

        +
        sudo vi /etc/fstab
        +
        +

        Insert the following line, replacing the UUID with the value found from lsblk -f:

        +
        UUID=************-****-****-****-******** /ssd/ ext4 defaults 0 2
        +
        +
      6. +
      7. +

        Finally, change the ownership of the /ssd directory.

        +
        sudo chown ${USER}:${USER} /ssd
        +
        +
      8. +
      +

      Docker

      +
        +
      1. +

        Install nvidia-container package.

        +
        +

        Note: If you used an NVIDIA-supplied SD card image to flash your SD card, all necessary JetPack components (including nvidia-containers) and Docker are already pre-installed, so this step can be skipped.

        +
        +
        sudo apt update
        +sudo apt install -y nvidia-container
        +
        +
        +

        JetPack 6.0 DP users

        +

        If you flash Jetson Linux (L4T) R36.2 (JetPack 6.0 DP) on your Jetson using SDK Manager, and install nvidia-container using apt, on JetPack 6.0 it no longer automatically installs Docker.

        +

        Therefore, you need to run the following to manually install Docker and set it up.

        +
        sudo apt update
        +sudo apt install -y nvidia-container curl
        +curl https://get.docker.com | sh && sudo systemctl --now enable docker
        +sudo nvidia-ctk runtime configure --runtime=docker
        +
        +
        +
      2. +
      3. +

        Restart the Docker service and add your user to the docker group, so that you don't need to use the command with sudo.

        +
        sudo systemctl restart docker
        +sudo usermod -aG docker $USER
        +newgrp docker
        +
        +
      4. +
      5. +

        Add default runtime in /etc/docker/daemon.json

        +
        sudo vi /etc/docker/daemon.json
        +
        +

        Insert the "default-runtime": "nvidia" line as following:

        +
        {
        +    "runtimes": {
        +        "nvidia": {
        +            "path": "nvidia-container-runtime",
        +            "runtimeArgs": []
        +        }
        +    },
        +    "default-runtime": "nvidia"
        +}
        +
        +
      6. +
      7. +

        Restart Docker

        +
        sudo systemctl daemon-reload && sudo systemctl restart docker
        +
        +
      8. +
      +

      Migrate Docker directory to SSD

      +

      Now that the SSD is installed and available on your device, you can use the extra storage capacity to hold the storage-demanding Docker directory.

      +
        +
      1. +

        Stop the Docker service.

        +
        sudo systemctl stop docker
        +
        +
      2. +
      3. +

        Move the existing Docker folder

        +
        sudo du -csh /var/lib/docker/ && \
        +    sudo mkdir /ssd/docker && \
        +    sudo rsync -axPS /var/lib/docker/ /ssd/docker/ && \
        +    sudo du -csh  /ssd/docker/ 
        +
        +
      4. +
      5. +

        Edit /etc/docker/daemon.json

        +
        sudo vi /etc/docker/daemon.json
        +
        +

        Insert "data-root" line like the following.

        +
        {
        +    "runtimes": {
        +        "nvidia": {
        +            "path": "nvidia-container-runtime",
        +            "runtimeArgs": []
        +        }
        +    },
        +    "default-runtime": "nvidia",
        +    "data-root": "/ssd/docker"
        +}
        +
        +
      6. +
      7. +

        Rename the old Docker data directory

        +
        sudo mv /var/lib/docker /var/lib/docker.old
        +
        +
      8. +
      9. +

        Restart the docker daemon

        +
        sudo systemctl daemon-reload && \
        +    sudo systemctl restart docker && \
        +    sudo journalctl -u docker
        +
        +
      10. +
      +

      Test Docker on SSD

      +
        +
      1. +

        [Terminal 1] First, open a terminal to monitor the disk usage while pulling a Docker image.

        +
        watch -n1 df 
        +
        +
      2. +
      3. +

        [Terminal 2] Next, open a new terminal and start Docker pull.

        +
        docker pull nvcr.io/nvidia/l4t-base:r35.2.1
        +
        +
      4. +
      5. +

        [Terminal 1] Observe that the disk usage on /ssd goes up as the container image is downloaded and extracted.

        +
        ~$ docker image ls
        +REPOSITORY                  TAG       IMAGE ID       CREATED        SIZE
        +nvcr.io/nvidia/l4t-base     r35.2.1   dc07eb476a1d   7 months ago   713MB
        +
        +
      6. +
      +

      Final Verification

      +

      Reboot your Jetson, and verify that you observe the following:

      +
      ~$ sudo blkid | grep nvme
      +/dev/nvme0n1: UUID="9fc06de1-7cf3-43e2-928a-53a9c03fc5d8" TYPE="ext4"
      +
      +~$ df -h
      +Filesystem      Size  Used Avail Use% Mounted on
      +/dev/mmcblk1p1  116G   18G   94G  16% /
      +none            3.5G     0  3.5G   0% /dev
      +tmpfs           3.6G  108K  3.6G   1% /dev/shm
      +tmpfs           734M   35M  699M   5% /run
      +tmpfs           5.0M  4.0K  5.0M   1% /run/lock
      +tmpfs           3.6G     0  3.6G   0% /sys/fs/cgroup
      +tmpfs           734M   88K  734M   1% /run/user/1000
      +/dev/nvme0n1    458G  824M  434G   1% /ssd
      +
      +~$ docker info | grep Root
      + Docker Root Dir: /ssd/docker
      +
      +~$ sudo ls -l /ssd/docker/
      +total 44
      +drwx--x--x  4 root root 4096 Mar 22 11:44 buildkit
      +drwx--x---  2 root root 4096 Mar 22 11:44 containers
      +drwx------  3 root root 4096 Mar 22 11:44 image
      +drwxr-x---  3 root root 4096 Mar 22 11:44 network
      +drwx--x--- 13 root root 4096 Mar 22 16:20 overlay2
      +drwx------  4 root root 4096 Mar 22 11:44 plugins
      +drwx------  2 root root 4096 Mar 22 16:19 runtimes
      +drwx------  2 root root 4096 Mar 22 11:44 swarm
      +drwx------  2 root root 4096 Mar 22 16:20 tmp
      +drwx------  2 root root 4096 Mar 22 11:44 trust
      +drwx-----x  2 root root 4096 Mar 22 16:19 volumes
      +
      +~$ sudo du -chs /ssd/docker/
      +752M    /ssd/docker/
      +752M    total
      +
      +~$ docker info | grep -e "Runtime" -e "Root"
      + Runtimes: io.containerd.runtime.v1.linux nvidia runc io.containerd.runc.v2
      + Default Runtime: nvidia
      + Docker Root Dir: /ssd/docker
      +
      +

      Your Jetson is now set up with the SSD!

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/try.html b/try.html new file mode 100644 index 00000000..d1a9b19c --- /dev/null +++ b/try.html @@ -0,0 +1,1232 @@ + + + + + + + + + + + + + + + + + + + + + Try - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      + +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial-intro.html b/tutorial-intro.html new file mode 100644 index 00000000..1cb40e9a --- /dev/null +++ b/tutorial-intro.html @@ -0,0 +1,1614 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Introduction

      +

      Overview

      +

      Our tutorials are divided into categories roughly based on model modality, the type of data to be processed or generated.

      +

      Text (LLM)

      + + + + + + + + + + + + + + + + + + + + + + + + + +
      text-generation-webuiInteract with a local AI assistant by running a LLM with oobabooga's text-generaton-webui
      llamaspeakTalk live with Llama using Riva ASR/TTS, and chat about images with Llava!
      Small LLM (SLM)Deploy Small Language Models (SLM) with reduced memory usage and higher throughput.
      API ExamplesLearn how to write Python code for doing LLM inference using popular APIs.
      +

      Text + Vision (VLM)

      +

      Give your locally running LLM an access to vision!

      + + + + + + + + + + + + + + + + + + + + + + + + + +
      Mini-GPT4Mini-GPT4, an open-source model that demonstrate vision-language capabilities.
      LLaVALarge Language and Vision Assistant, multimodal model that combines a vision encoder and LLM for visual and language understanding.
      Live LLaVARun multimodal models interactively on live video streams over a repeating set of prompts.
      NanoVLMUse mini vision/language models and the optimized multimodal pipeline for live streaming.
      +

      Image Generation

      + + + + + + + + + + + + + + + + + +
      Stable DiffusionRun AUTOMATIC1111's stable-diffusion-webui to generate images from prompts
      Stable Diffusion XLA newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities.
      +

      Vision Transformers (ViT)

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      EfficientVITMIT Han Lab's EfficientViT, Multi-Scale Linear Attention for High-Resolution Dense Prediction
      NanoOWLOWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT
      NanoSAMNanoSAM, SAM model variant capable of running in real-time on Jetson
      SAMMeta's SAM, Segment Anything model
      TAMTAM, Track-Anything model, is an interactive tool for video object tracking and segmentation
      +

      Vector Database

      + + + + + + + + + + + + + +
      NanoDBInteractive demo to witness the impact of Vector Database that handles multimodal data
      +

      Audio

      + + + + + + + + + + + + + + + + + +
      AudioCraftMeta's AudioCraft, to produce high-quality audio and music
      WhisperOpenAI's Whisper, pre-trained model for automatic speech recognition (ASR)
      +

      Metropolis Microservices

      + + + + + + + + + + + + + +
      First StepsGet Metropolis Microservices up & running on Jetson with NVStreamer and AI NVR capabilities.
      +

      About NVIDIA Jetson

      +
      +

      Note

      +

      We are mainly targeting Jetson Orin generation devices for deploying the latest LLMs and generative AI models.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Jetson AGX Orin 64GB Developer KitJetson AGX Orin Developer KitJetson Orin Nano Developer Kit


      GPU2048-core NVIDIA Ampere architecture GPU with 64 Tensor Cores1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores
      RAM
      (CPU+GPU)
      64GB32GB8GB
      Storage64GB eMMC (+ NVMe SSD)microSD card (+ NVMe SSD)
      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_api-examples.html b/tutorial_api-examples.html new file mode 100644 index 00000000..2a77bd8c --- /dev/null +++ b/tutorial_api-examples.html @@ -0,0 +1,1447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API Examples - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - API Examples

      +

      It's good to know the code for generating text with LLM inference, and ancillary things like tokenization, chat templates, and prompting. On this page we give Python examples of running various LLM APIs, and their benchmarks.

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35) +JetPack 6 (L4T r36)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 22GB for l4t-text-generation container image
        • +
        • Space for models (>10GB)
        • +
        +
      6. +
      +
      +

      Transformers

      +

      The HuggingFace Transformers API is the de-facto API that models are released for, often serving as the reference implementation. It's not terribly fast, but it does have broad model support, and also supports quantization (AutoGPTQ, AWQ). This uses streaming:

      +
      from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
      +from threading import Thread
      +
      +model_name='meta-llama/Llama-2-7b-chat-hf'
      +model = AutoModelForCausalLM.from_pretrained(model_name, device_map='cuda')
      +
      +tokenizer = AutoTokenizer.from_pretrained(model_name)
      +streamer = TextIteratorStreamer(tokenizer)
      +
      +prompt = [{'role': 'user', 'content': 'Can I get a recipe for French Onion soup?'}]
      +inputs = tokenizer.apply_chat_template(
      +    prompt,
      +    add_generation_prompt=True,
      +    return_tensors='pt'
      +).to(model.device)
      +
      +Thread(target=lambda: model.generate(inputs, max_new_tokens=256, streamer=streamer)).start()
      +
      +for text in streamer:
      +    print(text, end='', flush=True)
      +
      +

      To run this (it can be found here), you can mount a directory containing the script or your jetson-containers directory:

      +
      jetson-containers run --volume $PWD/packages/llm:/mount --workdir /mount \
      +  $(autotag l4t-text-generation) \
      +    python3 transformers/test.py
      +
      +

      We use the l4t-text-generation container because it includes the quantization libraries in addition to Transformers, for running the quanztized versions of the models like TheBloke/Llama-2-7B-Chat-GPTQ

      +

      Benchmarks

      +

      The huggingface-benchmark.py script will benchmark the models:

      +
      ./run.sh --volume $PWD/packages/llm/transformers:/mount --workdir /mount \
      +  $(./autotag l4t-text-generation) \
      +    python3 huggingface-benchmark.py --model meta-llama/Llama-2-7b-chat-hf
      +
      +
      * meta-llama/Llama-2-7b-chat-hf  AVG = 20.7077 seconds,  6.2 tokens/sec  memory=10173.45 MB
      +* TheBloke/Llama-2-7B-Chat-GPTQ  AVG = 12.3922 seconds, 10.3 tokens/sec  memory=7023.36 MB
      +* TheBloke/Llama-2-7B-Chat-AWQ   AVG = 11.4667 seconds, 11.2 tokens/sec  memory=4662.34 MB
      +
      +

      NanoLLM

      +

      The NanoLLM library uses the optimized MLC/TVM library for inference, like on the Benchmarks page:

      +

      +
      from nano_llm import NanoLLM, ChatHistory, ChatTemplates
      +
      +# load model
      +model = NanoLLM.from_pretrained(
      +    model='meta-llama/Llama-2-7b-chat-hf', 
      +    quantization='q4f16_ft', 
      +    api='mlc'
      +)
      +
      +# create the chat history
      +chat_history = ChatHistory(model, system_prompt="You are a helpful and friendly AI assistant.")
      +
      +while True:
      +    # enter the user query from terminal
      +    print('>> ', end='', flush=True)
      +    prompt = input().strip()
      +
      +    # add user prompt and generate chat tokens/embeddings
      +    chat_history.append(role='user', msg=prompt)
      +    embedding, position = chat_history.embed_chat()
      +
      +    # generate bot reply
      +    reply = model.generate(
      +        embedding, 
      +        streaming=True, 
      +        kv_cache=chat_history.kv_cache,
      +        stop_tokens=chat_history.template.stop,
      +        max_new_tokens=256,
      +    )
      +
      +    # append the output stream to the chat history
      +    bot_reply = chat_history.append(role='bot', text='')
      +
      +    for token in reply:
      +        bot_reply.text += token
      +        print(token, end='', flush=True)
      +
      +    print('\n')
      +
      +    # save the inter-request KV cache 
      +    chat_history.kv_cache = reply.kv_cache
      +
      +

      This example keeps an interactive chat running with text being entered from the terminal. You can start it like this:

      +
      jetson-containers run $(autotag nano_llm) \
      +    python3 -m nano_llm.chat.example
      +
      +

      Or for easy editing from the host device, copy the source into your own script and mount it into the container with the --volume flag.

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_audiocraft.html b/tutorial_audiocraft.html new file mode 100644 index 00000000..a2db8620 --- /dev/null +++ b/tutorial_audiocraft.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AudioCraft - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - AudioCraft

      +

      Let's run Meta's AudioCraft, to produce high-quality audio and music on Jetson!

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 10.7 GB for audiocraft container image
        • +
        • Space for checkpoints
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use run.sh and autotag script to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag audiocraft)
      +
      +

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

      +

      Open your browser and access http://<IP_ADDRESS>:8888.

      +
      +

      The default password for Jupyter Lab is nvidia.

      +
      +

      Run Jupyter notebooks

      +

      AudioCraft repo comes with demo Jupyter notebooks.

      +

      On Jupyter Lab navigation pane on the left, double-click demos folder.

      +

      +

      AudioGen demo

      +

      For "Text-conditional Generation", you should get something like this.

      +

      +
      +

      Info

      +

      You may encounter an error message like the following when executing the first cell, but you can keep going. +

      A matching Triton is not available, some optimizations will not be enabled.
      +Error caught was: No module named 'triton'
      +

      +
      +
      +

      Warning

      +

      When running the 5-th cell of audiogen_demo.ipynb, you may run into "Failed to load audio" RuntimeError.

      +
      +

      MusicGen demo

      +

      For "Text-conditional Generation", you should get something like this.

      +

      +
      +

      Warning

      +

      When running the 5-th cell of musicgen_demo.ipynb, you may run into "Failed to load audio" RuntimeError.

      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_distillation.html b/tutorial_distillation.html new file mode 100644 index 00000000..a88151eb --- /dev/null +++ b/tutorial_distillation.html @@ -0,0 +1,1249 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 📑 Knowledge Distillation - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      + +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_efficientvit.html b/tutorial_efficientvit.html new file mode 100644 index 00000000..66f5fb7e --- /dev/null +++ b/tutorial_efficientvit.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_live-llava.html b/tutorial_live-llava.html new file mode 100644 index 00000000..d06c3c7b --- /dev/null +++ b/tutorial_live-llava.html @@ -0,0 +1,1418 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Live LLaVA - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Live LLaVA

      +
      +

      Recommended

      +

      Follow the chat-based LLaVA and NanoVLM tutorials to familiarize yourself with vision/language models and test the models first.

      +
      +

      This multimodal agent runs a vision-language model on a live camera feed or video stream, repeatedly applying the same prompts to it:

      +

      +

      It uses models like LLaVA or VILA (based on Llama and CLIP) and has been quantized with 4-bit precision to be deployed on Jetson Orin. This runs an optimized multimodal pipeline from the NanoLLM library, including event filters and alerts, and multimodal RAG:

      + + +

      Running the Live Llava Demo

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 22GB for nano_llm container image
        • +
        • Space for models (>10GB)
        • +
        +
      6. +
      7. +

        Follow the chat-based LLaVA and NanoVLM tutorials first.

        +
      8. +
      9. +

        Supported vision/language models:

        + +
      10. +
      +
      +

      The VideoQuery agent applies prompts to the incoming video feed with the VLM. Navigate your browser to https://<IP_ADDRESS>:8050 after launching it with your camera (Chrome is recommended with chrome://flags#enable-webrtc-hide-local-ips-with-mdns disabled)

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.agents.video_query --api=mlc \
      +    --model Efficient-Large-Model/VILA-2.7b \
      +    --max-context-len 768 \
      +    --max-new-tokens 32 \
      +    --video-input /dev/video0 \
      +    --video-output webrtc://@:8554/output
      +
      +

      +

      This uses jetson_utils for video I/O, and for options related to protocols and file formats, see Camera Streaming and Multimedia. In the example above, it captures a V4L2 USB webcam connected to the Jetson (under the device /dev/video0) and outputs a WebRTC stream.

      +

      Processing a Video File or Stream

      +

      The example above was running on a live camera, but you can also read and write a video file or network stream by substituting the path or URL to the --video-input and --video-output command-line arguments like this:

      +
      jetson-containers run \
      +  -v /path/to/your/videos:/mount
      +  $(autotag nano_llm) \
      +    python3 -m nano_llm.agents.video_query --api=mlc \
      +      --model Efficient-Large-Model/VILA-2.7b \
      +      --max-new-tokens 32 \
      +      --video-input /mount/my_video.mp4 \
      +      --video-output /mount/output.mp4 \
      +      --prompt "What does the weather look like?"
      +
      +

      This example processes and pre-recorded video (in MP4, MKV, AVI, FLV formats with H.264/H.265 encoding), but it also can input/output live network streams like RTP, RTSP, and WebRTC using Jetson's hardware-accelerated video codecs.

      +

      NanoDB Integration

      +

      If you launch the VideoQuery agent with the --nanodb flag along with a path to your NanoDB database, it will perform reverse-image search on the incoming feed against the database by re-using the CLIP embeddings generated by the VLM.

      +

      To enable this mode, first follow the NanoDB tutorial to download, index, and test the database. Then launch VideoQuery like this:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.agents.video_query --api=mlc \
      +    --model Efficient-Large-Model/VILA-2.7b \
      +    --max-context-len 768 \
      +    --max-new-tokens 32 \
      +    --video-input /dev/video0 \
      +    --video-output webrtc://@:8554/output \
      +    --nanodb /data/nanodb/coco/2017
      +
      +

      You can also tag incoming images and add them to the database using the panel in the web UI.

      +
      + +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_llamaspeak.html b/tutorial_llamaspeak.html new file mode 100644 index 00000000..fb7ddfe8 --- /dev/null +++ b/tutorial_llamaspeak.html @@ -0,0 +1,1366 @@ + + + + + + + + + + + + + + + + + + + + + + + + + llamaspeak - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - llamaspeak

      +

      Talk live with Llama using ASR/TTS, and chat about images with Llava!

      +

      +
        +
      • The NanoLLM library provides optimized inference for LLM and speech models.
      • +
      • It's recommended to run JetPack 6.0 to be able to run the latest containers.
      • +
      +

      The WebChat agent has conversational abilities and multimodal support for chatting about images with vision/language models.

      +

      Running llamaspeak

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 6 (L4T r36)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 22GB for nano_llm container image
        • +
        • Space for models (>10GB)
        • +
        +
      6. +
      7. +

        Start the Riva server first and test the ASR/TTS services.

        +
      8. +
      +
      +
      jetson-containers run --env HUGGINGFACE_TOKEN=hf_xyz123abc456 \
      +  $(autotag nano_llm) \
      +  python3 -m nano_llm.agents.web_chat --api=mlc \
      +    --model meta-llama/Llama-2-7b-chat-hf \
      +    --asr=riva --tts=riva
      +
      +

      This will start llamaspeak with text LLM and Riva for ASR/TTS. You can then navigate your browser to https://IP_ADDRESS:8050 + +

        +
      • The default port is 8050, but can be changed with --web-port (and --ws-port for the websocket port)
      • +
      • To debug issues with client/server communication, use --verbose and --web-trace to print incoming/outgoing websocket messages.
      • +
      • During bot replies, the TTS model will pause output if you speak a few words in the mic to interrupt it.
      • +
      +

      + + +

      Multimodality

      +

      If you load a multimodal vision/language model instead, you can drag images into the chat and ask questions about them:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.agents.web_chat --api=mlc \
      +    --model Efficient-Large-Model/VILA-7b \
      +    --asr=riva --tts=riva
      +
      + + +

      For more info about the supported vision/language models, see the NanoVLM page.

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_llava.html b/tutorial_llava.html new file mode 100644 index 00000000..27226288 --- /dev/null +++ b/tutorial_llava.html @@ -0,0 +1,1684 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LLaVA - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + +

      Tutorial - LLaVA

      +

      LLaVA is a popular multimodal vision/language model that you can run locally on Jetson to answer questions about image prompts and queries. Llava uses the CLIP vision encoder to transform images into the same embedding space as its LLM (which is the same as Llama architecture). Below we cover different methods to run Llava on Jetson, with increasingly optimized performance:

      +
        +
      1. Chat with Llava using text-generation-webui
      2. +
      3. Run from the terminal with llava.serve.cli
      4. +
      5. Quantized GGUF models with llama.cpp
      6. +
      7. Optimized Multimodal Pipeline with NanoVLM
      8. +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Llava-13B (Jetson AGX Orin)QuantizationTokens/secMemory
      text-generation-webui4-bit (GPTQ)2.39.7 GB
      llava.serve.cliFP16 (None)4.227.7 GB
      llama.cpp4-bit (Q4_K)10.19.2 GB
      NanoVLM4-bit (MLC)21.18.7 GB
      +

      In addition to Llava, the NanoVLM pipeline supports VILA and mini vision models that run on Orin Nano as well.

      +

      +

      1. Chat with Llava using text-generation-webui

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.2GB for text-generation-webui container image
        • +
        • Space for models
            +
          • CLIP model : 1.7GB
          • +
          • Llava-v1.5-13B-GPTQ model : 7.25GB
          • +
          +
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      Download Model

      +
      jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \
      +  python3 download-model.py --output=/data/models/text-generation-webui \
      +    TheBloke/llava-v1.5-13B-GPTQ
      +
      +

      Start Web UI with Multimodal Extension

      +
      jetson-containers run --workdir=/opt/text-generation-webui $(autotag text-generation-webui) \
      +  python3 server.py --listen \
      +    --model-dir /data/models/text-generation-webui \
      +    --model TheBloke_llava-v1.5-13B-GPTQ \
      +    --multimodal-pipeline llava-v1.5-13b \
      +    --loader autogptq \
      +    --disable_exllama \
      +    --verbose
      +
      +

      Go to Chat tab, drag and drop an image into the Drop Image Here area, and your question in the text area and hit Generate:

      +

      +

      Result

      +

      +

      2. Run from the terminal with llava.serve.cli

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson:

        +

        Jetson AGX Orin 64GB +Jetson AGX Orin (32GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.1GB for llava container
        • +
        • 14GB for Llava-7B (or 26GB for Llava-13B)
        • +
        +
      6. +
      +
      +

      This example uses the upstream Llava repo to run the original, unquantized Llava models from the command-line. It uses more memory due to using FP16 precision, and is provided mostly as a reference for debugging. See the Llava container readme for more info.

      +

      llava-v1.5-7b

      +
      jetson-containers run $(autotag llava) \
      +  python3 -m llava.serve.cli \
      +    --model-path liuhaotian/llava-v1.5-7b \
      +    --image-file /data/images/hoover.jpg
      +
      +

      llava-v1.5-13b

      +
      jetson-containers run $(autotag llava) \
      +  python3 -m llava.serve.cli \
      +    --model-path liuhaotian/llava-v1.5-13b \
      +    --image-file /data/images/hoover.jpg
      +
      +
      +

      Unquantized 13B may run only on Jetson AGX Orin 64GB due to memory requirements.

      +
      + + +

      3. Quantized GGUF models with llama.cpp

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      +
      +

      llama.cpp is one of the faster LLM API's, and can apply a variety of quantization methods to Llava to reduce its memory usage and runtime. Despite its name, it uses CUDA. There are pre-quantized versions of Llava-1.5 available in GGUF format for 4-bit and 5-bit:

      + +
      jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \
      +  /bin/bash -c './llava-cli \
      +    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \
      +    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \
      +    --n-gpu-layers 999 \
      +    --image /data/images/hoover.jpg \
      +    --prompt "What does the sign say"'
      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + +
      QuantizationBitsResponseTokens/secMemory
      Q4_K4The sign says "Hoover Dam, Exit 9."10.179.2 GB
      Q5_K5The sign says "Hoover Dam exit 9."9.7310.4 GB
      +

      A lower temperature like 0.1 is recommended for better quality (--temp 0.1), and if you omit --prompt it will describe the image:

      +
      jetson-containers run --workdir=/opt/llama.cpp/bin $(autotag llama_cpp:gguf) \
      +  /bin/bash -c './llava-cli \
      +    --model $(huggingface-downloader mys/ggml_llava-v1.5-13b/ggml-model-q4_k.gguf) \
      +    --mmproj $(huggingface-downloader mys/ggml_llava-v1.5-13b/mmproj-model-f16.gguf) \
      +    --n-gpu-layers 999 \
      +    --image /data/images/lake.jpg'
      +
      +In this image, a small wooden pier extends out into a calm lake, surrounded by tall trees and mountains. The pier seems to be the only access point to the lake. The serene scene includes a few boats scattered across the water, with one near the pier and the others further away. The overall atmosphere suggests a peaceful and tranquil setting, perfect for relaxation and enjoying nature.
      +
      +

      You can put your own images in the mounted jetson-containers/data directory. The C++ code for llava-cli can be found here. The llama-cpp-python bindings also support Llava, however they are significantly slower from Python for some reason (potentially pre-processing)

      +

      4. Optimized Multimodal Pipeline with NanoVLM

      +
      +

      What's Next

      +

      This section got too long and was moved to the NanoVLM page - check it out there for performance optimizations, mini VLMs, and live streaming!

      +
      +

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_minigpt4.html b/tutorial_minigpt4.html new file mode 100644 index 00000000..d19538f9 --- /dev/null +++ b/tutorial_minigpt4.html @@ -0,0 +1,1348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Mini-GPT4 - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - MiniGPT-4

      +

      Give your locally running LLM an access to vision, by running MiniGPT-4 on Jetson!

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        + +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      Start minigpt4 container with models

      +

      To start the MiniGPT4 container and webserver with the recommended models, run this command:

      +
      jetson-containers run $(autotag minigpt4) /bin/bash -c 'cd /opt/minigpt4.cpp/minigpt4 && python3 webui.py \
      +  $(huggingface-downloader --type=dataset maknee/minigpt4-13b-ggml/minigpt4-13B-f16.bin) \
      +  $(huggingface-downloader --type=dataset maknee/ggml-vicuna-v0-quantized/ggml-vicuna-13B-v0-q5_k.bin)'
      +
      +

      Then, open your web browser and access http://<IP_ADDRESS>:7860.

      +

      Results

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_mmj.html b/tutorial_mmj.html new file mode 100644 index 00000000..1743abd0 --- /dev/null +++ b/tutorial_mmj.html @@ -0,0 +1,1514 @@ + + + + + + + + + + + + + + + + + + + + + + + + + First Steps - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + +

      First Steps

      + +

      17# First steps with Metropolis Microservices for Jetson

      +

      NVIDIA Metropolis Microservices for Jetson simplifies the development of vision AI applications, offering a suite of customizable, cloud-native tools. Before diving into this tutorial, ensure you've filled out the Metropolis Microservices for Jetson Early Access form to gain the necessary access to launch the services. This step is crucial as it enables you to utilize all the features and capabilities discussed in this guide.

      +

      Perfect for both newcomers and experts, this tutorial provides straightforward steps to kick-start your edge AI projects. Whether you're a student or an ecosystem partner working on a use case, this guide offers a straightforward start for every skill level.

      +

      android2

      +

      0. Install NVIDIA Jetson Services:

      +

      Ok, let's start by installing NVIDIA Jetson Services: +

      sudo apt install nvidia-jetson-services
      +

      +

      Let's add some performance hacks that will be needed to run the demo faster and without streaming artifacts:

      +
        +
      • +

        If you don't have the Orin at max performance, you can use these two commands, a reboot is needed after: +

        sudo nvpmodel -m 0 
        +sudo jetson_clocks
        +

        +
      • +
      • +

        After these two commands, a reboot is needed if your Jetson wasn't already in high-performance mode. +These are optional, but they fine-tune your network buffers to ensure smoother streaming by optimizing how much data can be sent and received: +

        sudo sysctl -w net.core.rmem_default=2129920
        +sudo sysctl -w net.core.rmem_max=10000000
        +sudo sysctl -w net.core.wmem_max=2000000
        +

        +
      • +
      +
      +

      1. Download NVIDIA CLI for Jetson

      +

      Download NGC for ARM64 from the NGC for CLI site: +

      unzip ngccli_arm64.zip
      +chmod u+x ngc-cli/ngc
      +echo "export PATH=\"\$PATH:$(pwd)/ngc-cli\"" >> ~/.bash_profile && source ~/.bash_profile
      +ngc config set
      +
      +Here it will ask for your API Key, and the organization name, to get those you need to login into NGC and generate an API key here.

      +

      You should then paste the API key and use the organization name you are using. You can also press [Enter] to select the default values for the remaining options. After this, you should get the message:

      +
      Successfully saved NGC configuration to /home/jetson/.ngc/config
      +
      +

      Then, login with the same API key: +

      sudo docker login nvcr.io -u "\$oauthtoken" -p <NGC-API-KEY>
      +

      +

      Now launch the Redis and Ingress services, as we need them for this tutorial.

      +
      sudo systemctl start jetson-redis
      +sudo systemctl start jetson-ingress
      +
      +
      +

      2. Download and launch NVStreamer

      +

      +

      First, we need to install NVStreamer, an app that streams the videos MMJs will need to run AI on them. Follow this NVStreamer Link (In the top-left, click Download files.zip)

      +

      unzip files.zip
      +rm files.zip
      +tar -xvf nvstreamer.tar.gz
      +cd nvstreamer
      +
      +Launch it: +
      sudo docker compose -f compose_nvstreamer.yaml up -d  --force-recreate
      +

      +
      +

      3. Download AI_NVR and launch:

      +

      +

      AI NVR (NGC) Link (Top-left -> Download files.zip)

      +
      unzip files.zip
      +rm files.zip
      +tar -xvf ai_nvr.tar.gz
      +sudo cp ai_nvr/config/ai-nvr-nginx.conf /opt/nvidia/jetson/services/ingress/config/
      +cd ai_nvr
      +sudo docker compose -f compose_agx.yaml up -d --force-recreate
      +
      +

      4. Download some sample videos and upload them to NVStreamer

      +

      Download them from here.

      +

      unzip files.zip
      +
      +Ok, now, this is important to understand, there are 2 web interfaces:

      +
        +
      1. The NVStream Streamer Dashboard, running in: http://localhost:31000
      2. +
      3. The NVStreamer Camera Management Dashboard, running in: http://localhost:31000
      4. +
      +

      So, first we need to upload the file in the Streamer interface, it looks like this:

      +
      +

      image1

      +
      +

      There, go to File Upload, and drag and drop the file in the upload squared area.

      +

      After uploading it, go to the Dashboad option of the left menu, and copy the RTSP URL of the video you just uploaded, you will need it for the Camera Management Dashboard.

      +

      Now jump to the Camera Management Dashboard (http://localhost:30080/vst), it looks like this:

      +
      +

      image2

      +
      +

      Go to the Camera Management option of the menu, then use the Add device manually option, and paste the RTSP URL, add the name of your video to the Name and Location text boxes, so it will be displayed on top of the stream.

      +

      Finally, click in the Live Streams option of the left menu, and you should be able to watch your video stream.

      +
      +

      video1

      +
      +

      5. Watch RTSP AI processed streaming from VLC

      +

      Open VLC from another computer (localhost doesn't work here), and point to your Jetson Orin's IP address (you should be in the same network, or not having a firewal to access).

      +

      The easiest way to get Jetson's ip is launching: +

      ifconfig
      +
      +And checking the IP of the interface (usually wlan0, inet IP).

      +

      Then go to rtsp://[JETSON_IP]:8555/ds-test using VLC like this:

      +
      +

      video2

      +
      +

      6. Android app

      +

      There is an Android app that allows you to track events and create areas of interest to monitor, you can find it on Google Play as AI NVR.

      +

      +
      +

      Here is a quick walkthough where you can see how to:

      +
        +
      • Add the IP address of the Jetson
      • +
      • Track current events
      • +
      • Add new areas of interest
      • +
      • Add tripwire to track the flux and direction of events
      • +
      +

      android2

      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nano-llm.html b/tutorial_nano-llm.html new file mode 100644 index 00000000..f1fe8b28 --- /dev/null +++ b/tutorial_nano-llm.html @@ -0,0 +1,1349 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NanoLLM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      NanoLLM - Optimized LLM Inference

      +

      NanoLLM is a lightweight, high-performance library using optimized inferencing APIs for quantized LLM’s, multimodality, speech services, vector databases with RAG, and web frontends. It's used to build many of the responsive, low-latency agents featured on this site.

      +

      +

      It provides similar APIs to HuggingFace, backed by highly-optimized inference libraries and quantization tools:

      +
      from nano_llm import NanoLLM
      +
      +model = NanoLLM.from_pretrained(
      +   "meta-llama/Llama-2-7b-hf",  # HuggingFace repo/model name, or path to HF model checkpoint
      +   api='mlc',                   # supported APIs are: mlc, awq, hf
      +   api_token='hf_abc123def',    # HuggingFace API key for authenticated models ($HUGGINGFACE_TOKEN)
      +   quantization='q4f16_ft'      # q4f16_ft, q4f16_1, q8f16_0 for MLC, or path to AWQ weights
      +)
      +
      +response = model.generate("Once upon a time,", max_new_tokens=128)
      +
      +for token in response:
      +   print(token, end='', flush=True)
      +
      +

      Resources

      +

      Here's an index of the various tutorials & examples using NanoLLM on Jetson AI Lab:

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      BenchmarksBenchmarking results for LLM, SLM, VLM using MLC/TVM backend
      API ExamplesPython code examples for completion and multi-turn chat
      LlamaspeakTalk verbally with LLMs using low-latency ASR/TTS speech models
      Small LLM (SLM)Focus on language models with reduced footprint (7B params and below)
      Live LLaVARealtime live-streaming vision/language models on recurring prompts
      Nano VLMEfficient multimodal pipeline with one-shot RAG support
      +
      + +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nano-vlm.html b/tutorial_nano-vlm.html new file mode 100644 index 00000000..bacea78e --- /dev/null +++ b/tutorial_nano-vlm.html @@ -0,0 +1,1507 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NanoVLM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      NanoVLM - Efficient Multimodal Pipeline

      +

      We saw in the previous LLaVA tutorial how to run vision-language models through tools like text-generation-webui and llama.cpp. In a similar vein to the SLM page on Small Language Models, here we'll explore optimizing VLMs for reduced memory usage and higher performance that reaches interactive levels (like in Liva LLava). These are great for fitting on Orin Nano and increasing the framerate.

      +

      There are 3 model families currently supported: Llava, VILA, and Obsidian (mini VLM)

      +

      VLM Benchmarks

      + + +

      This FPS measures the end-to-end pipeline performance for continuous streaming like with Live Llava (on yes/no question)

      + + +
      +

      •   These models all use CLIP ViT-L/14@336px for the vision encoder.
      +•   Jetson Orin Nano 8GB runs out of memory trying to run Llava-13B.

      +
      +

      Multimodal Chat

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 6 (L4T r36)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 22GB for nano_llm container image
        • +
        • Space for models (>10GB)
        • +
        +
      6. +
      7. +

        Supported VLM models in NanoLLM:

        + +
      8. +
      +
      +

      The optimized NanoLLM library uses MLC/TVM for quantization and inference provides the highest performance. It efficiently manages the CLIP embeddings and KV cache. You can find Python code for the chat program used in this example here.

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.chat --api=mlc \
      +    --model liuhaotian/llava-v1.6-vicuna-7b \
      +    --max-context-len 768 \
      +    --max-new-tokens 128
      +
      +

      This starts an interactive console-based chat with Llava, and on the first run the model will automatically be downloaded from HuggingFace and quantized using MLC and W4A16 precision (which can take some time). See here for command-line options.

      +

      You'll end up at a >> PROMPT: in which you can enter the path or URL of an image file, followed by your question about the image. You can follow-up with multiple questions about the same image. Llava does not understand multiple images in the same chat, so when changing images, first reset the chat history by entering clear or reset as the prompt. VILA supports multiple images (area of active research)

      +

      Automated Prompts

      +

      During testing, you can specify prompts on the command-line that will run sequentially:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.chat --api=mlc \
      +    --model liuhaotian/llava-v1.6-vicuna-7b \
      +    --max-context-len 768 \
      +    --max-new-tokens 128 \
      +    --prompt '/data/images/hoover.jpg' \
      +    --prompt 'what does the road sign say?' \
      +    --prompt 'what kind of environment is it?' \
      +    --prompt 'reset' \
      +    --prompt '/data/images/lake.jpg' \
      +    --prompt 'please describe the scene.' \
      +    --prompt 'are there any hazards to be aware of?'
      +
      +

      You can also use --prompt /data/prompts/images.json to run the test sequence, the results of which are in the table below.

      +

      Results

      + + +

      •   The model responses are with 4-bit quantization enabled, and are truncated to 128 tokens for brevity.
      +•   These chat questions and images are from /data/prompts/images.json (found in jetson-containers)

      +

      JSON

      +

      When prompted, these models can also output in constrained JSON formats (which the LLaVA authors cover in their LLaVA-1.5 paper), and can be used to programatically query information about the image:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.chat --api=mlc \
      +    --model liuhaotian/llava-v1.5-13b \
      +    --prompt '/data/images/hoover.jpg' \
      +    --prompt 'extract any text from the image as json'
      +
      +{
      +  "sign": "Hoover Dam",
      +  "exit": "2",
      +  "distance": "1 1/2 mile"
      +}
      +
      +

      Web UI

      +

      To use this through a web browser instead, see the llamaspeak tutorial:

      +

      +

      Live Streaming

      +

      These models can also be used with the Live Llava agent for continuous streaming - just substitute the desired model name below:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.agents.video_query --api=mlc \
      +    --model Efficient-Large-Model/VILA-2.7b \
      +    --max-context-len 768 \
      +    --max-new-tokens 32 \
      +    --video-input /dev/video0 \
      +    --video-output webrtc://@:8554/output
      +
      +

      The Live Llava tutorial shows how to enable additional features like vector database integration, image tagging, and RAG.

      +
      + +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nanodb.html b/tutorial_nanodb.html new file mode 100644 index 00000000..305a5f0e --- /dev/null +++ b/tutorial_nanodb.html @@ -0,0 +1,1448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NanoDB - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - NanoDB

      +

      Let's run NanoDB's interactive demo to witness the impact of Vector Database that handles multimodal data.

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 16GB for container image
        • +
        • 40GB for MS COCO dataset
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Download COCO

      +

      Just for an example, let's use MS COCO dataset:

      +
      cd jetson-containers
      +mkdir -p data/datasets/coco/2017
      +cd data/datasets/coco/2017
      +
      +wget http://images.cocodataset.org/zips/train2017.zip
      +wget http://images.cocodataset.org/zips/val2017.zip
      +wget http://images.cocodataset.org/zips/unlabeled2017.zip
      +
      +unzip train2017.zip
      +unzip val2017.zip
      +unzip unlabeled2017.zip
      +
      +

      Download Index

      +

      You can download a pre-indexed NanoDB that was already prepared over the COCO dataset from here:

      +
      cd jetson-containers/data
      +wget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz
      +tar -xzvf nanodb_coco_2017.tar.gz
      +
      +

      This allow you to skip the indexing process in the next step, and jump to starting the Web UI.

      +

      Indexing Data

      +

      If you didn't download the NanoDB index for COCO from above, we need to build the index by scanning your dataset directory:

      +
      jetson-containers run $(autotag nanodb) \
      +  python3 -m nanodb \
      +    --scan /data/datasets/coco/2017 \
      +    --path /data/nanodb/coco/2017 \
      +    --autosave --validate 
      +
      +

      This will take a few hours on AGX Orin. Once the database has loaded and completed any start-up operations , it will drop down to a > prompt from which the user can run search queries. You can quickly check the operation by typing your query on this prompt:

      +
      > a girl riding a horse
      +
      +* index=80110   /data/datasets/coco/2017/train2017/000000393735.jpg      similarity=0.29991915822029114
      +* index=158747  /data/datasets/coco/2017/unlabeled2017/000000189708.jpg  similarity=0.29254037141799927
      +* index=123846  /data/datasets/coco/2017/unlabeled2017/000000026239.jpg  similarity=0.292171448469162
      +* index=127338  /data/datasets/coco/2017/unlabeled2017/000000042508.jpg  similarity=0.29118549823760986
      +* index=77416   /data/datasets/coco/2017/train2017/000000380634.jpg      similarity=0.28964102268218994
      +* index=51992   /data/datasets/coco/2017/train2017/000000256290.jpg      similarity=0.28929752111434937
      +* index=228640  /data/datasets/coco/2017/unlabeled2017/000000520381.jpg  similarity=0.28642547130584717
      +* index=104819  /data/datasets/coco/2017/train2017/000000515895.jpg      similarity=0.285491943359375
      +
      +

      You can press Ctrl+C to exit. For more info about the various options available, see the NanoDB container documentation.

      +

      Interactive Web UI

      +

      Spin up the Gradio server:

      +
      jetson-containers run $(autotag nanodb) \
      +  python3 -m nanodb \
      +    --path /data/nanodb/coco/2017 \
      +    --server --port=7860
      +
      +

      Then navigate your browser to http://<IP_ADDRESS>:7860, and you can enter text search queries as well as drag/upload images:

      + +
      +

      To use the dark theme, navigate to http://<IP_ADDRESS>:7860/?__theme=dark instead

      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_nanoowl.html b/tutorial_nanoowl.html new file mode 100644 index 00000000..cd54f1d9 --- /dev/null +++ b/tutorial_nanoowl.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_nanosam.html b/tutorial_nanosam.html new file mode 100644 index 00000000..f0e8988a --- /dev/null +++ b/tutorial_nanosam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_sam.html b/tutorial_sam.html new file mode 100644 index 00000000..84638f13 --- /dev/null +++ b/tutorial_sam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_slm.html b/tutorial_slm.html new file mode 100644 index 00000000..21c61843 --- /dev/null +++ b/tutorial_slm.html @@ -0,0 +1,1418 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Small LLM (SLM) - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Small Language Models (SLM)

      +

      Small Language Models (SLMs) represent a growing class of language models that have <7B parameters - for example StableLM, Phi-2, and Gemma-2B. Their smaller memory footprint and faster performance make them good candidates for deploying on Jetson Orin Nano. Some are very capable with abilities at a similar level as the larger models, having been trained on high-quality curated datasets.

      +

      +

      This tutorial shows how to run optimized SLMs with quantization using the NanoLLM library and MLC/TVM backend. You can run these models through tools like text-generation-webui and llama.cpp as well, just not as fast - and since the focus of SLMs is reduced computational and memory requirements, here we'll use the most optimized path available. Those shown below have been profiled:

      +

      SLM Benchmarks

      + + + + +
      +

      •   The HuggingFace Open LLM Leaderboard is a collection of multitask benchmarks including reasoning & comprehension, math, coding, history, geography, ect.
      +•   The model's memory footprint includes 4-bit weights and KV cache at full context length (factor in extra for process overhead, library code, ect)
      +•   The Chat Model is the instruction-tuned variant for chatting with in the commands below, as opposed to the base completion model.

      +
      +

      Based on user interactions, the recommended models to try are stabilityai/stablelm-zephyr-3b and princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT, for having output quality on par with Llama-2-7B and well-optimized neural architectures. These models have also been used as the base for various fine-tunes (for example Nous-Capybara-3B-V1.9) and mini VLMs. Others may not be particularly coherent.

      +

      Chatting with SLMs

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 22GB for nano_llm container image
        • +
        • Space for models (>5GB)
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      The nano_llm.chat program will automatically download and quantize models from HuggingFace like those listed in the table above:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.chat --api=mlc \
      +    --model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT
      +
      +
      +

      •   For models requiring authentication, use --env HUGGINGFACE_TOKEN=<YOUR-ACCESS-TOKEN>
      +•   Press Ctrl+C twice in succession to exit (once will interrupt bot output)

      +
      +

      This will enter into interactive mode where you chat back and forth using the keyboard (entering reset will clear the chat history)

      +

      +

      Automated Prompts

      +

      During testing, you can specify prompts on the command-line that will run sequentially:

      +
      jetson-containers run $(autotag nano_llm) \
      +  python3 -m nano_llm.chat --api=mlc \
      +    --model stabilityai/stablelm-zephyr-3b \
      +    --max-new-tokens 512 \
      +    --prompt 'hi, how are you?' \
      +    --prompt 'whats the square root of 900?' \
      +    --prompt 'can I get a recipie for french onion soup?'
      +
      +

      You can also load JSON files containing prompt sequences, like with --prompt /data/prompts/qa.json (the output of which is below)

      +

      Results

      + + +

      •   The model responses are with 4-bit quantization, and are truncated to 256 tokens for brevity.
      +•   These chat questions are from /data/prompts/qa.json (found in jetson-containers)

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_stable-diffusion-xl.html b/tutorial_stable-diffusion-xl.html new file mode 100644 index 00000000..88036b2a --- /dev/null +++ b/tutorial_stable-diffusion-xl.html @@ -0,0 +1,1397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Stable Diffusion XL - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Stable Diffusion XL

      +

      Stable Diffusion XL is a newer ensemble pipeline consisting of a base model and refiner that results in significantly enhanced and detailed image generation capabilities. All told, SDXL 1.0 has 6.6 billion model parameters, in comparison to 0.98 billion for the original SD 1.5 model.

      +

      a robot searching a database for images, nvidia green colors

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices (SDXL requires >= ~13GB memory)

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.8GB for container image
        • +
        • 12.4GB for SDXL models
        • +
        +
      6. +
      7. +

        Have followed the previous stable-diffusion-webui tutorial and have the webserver container running.

        +
      8. +
      +
      +

      Downloading SDXL Models

      +

      Stable Diffusion XL is supported through AUTOMATIC1111's stable-diffusion-webui with some additional settings. First you need to download the SDXL models to your jetson-containers data directory (which is automatically mounted into the container)

      +
      # run these outside of container, and replace CONTAINERS_DIR with the path to the jetson-containers repo on your device
      +CONTAINERS_DIR=/path/to/your/jetson-containers
      +MODEL_DIR=$CONTAINERS_DIR/data/models/stable-diffusion/models/Stable-diffusion/
      +
      +sudo chown -R $USER $MODEL_DIR
      +
      +wget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors
      +wget -P $MODEL_DIR https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors
      +
      +
      +

      It's assumed that you already have the stable-diffusion-webui container and webserver running from the previous tutorial.

      +
      +

      SDXL Settings

      +

      After the models have finished downloading, click the 🔄 button to refresh the model list in the web UI. Select sd_xl_base_1.0.safetensors from the Stable Diffusion checkpoint drop-down:

      +

      +

      Then under the Generation tab, expand the Refiner section, and select sd_xl_refiner_1.0.safetensors from the drop-down:

      +

      +

      Guidance on other relevant settings:

      +
        +
      • Change the width/height to 1024x1024. SDXL works best at higher resolutions, and using 512x512 often results in more simplistic/cartoonish content. Changing image resolutions impacts the actual scene contents of the image, not just the details.
      • +
      • The refiner's Switch at setting dictates the step at which the refiner takes over from the base model. At this point, additional subject content will no longer be added to the scene, but rather its details further refined in the image.
      • +
      • Typical Sampling steps are between 20-40 and Switch at is between 0.7-0.9. This takes experimentation to find the best combination for the characteristics of your desired output.
      • +
      • Extensive negative prompts are not as necessary in the same way as SD 1.5 was (e.g. low quality, jpeg artifacts, blurry, ect)
      • +
      • CFG Scale controls how closely the model conforms to your prompt versus how creative it is.
      • +
      +

      When you get a good image, remember to save your random seed and settings so you can re-generate it later!

      +

      Results

      +

      +
      +


      +photograph of a friendly robot alongside a person climbing a mountain (seed 1576166644, steps 25, switch @ 0.8, cfg scale 15)

      +
      +
      +


      +a girl and a boy building a friendly robot in their basement workshop (seed 642273464, steps 25, switch @ 0.9, cfg scale 7)

      +
      +
      +


      +small friendly robots playing games with people, in a futuristic Tokyo central park gardens with cherry blossoms and water, coy fish swimming in the water, sunshine (seed 642273464, steps 40, switch @ 0.8, cfg scale 7)

      +
      +
      +


      +small friendly robots playing games with people in a futuristic New York City Central Park in autumn, water (seed 642273464, steps 25, switch @ 0.8, cfg scale 7)

      +
      +
      +

      Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_stable-diffusion.html b/tutorial_stable-diffusion.html new file mode 100644 index 00000000..c28a2407 --- /dev/null +++ b/tutorial_stable-diffusion.html @@ -0,0 +1,1393 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Stable Diffusion - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Stable Diffusion

      +

      Let's run AUTOMATIC1111's stable-diffusion-webui on NVIDIA Jetson to generate images from our prompts!

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.8GB for container image
        • +
        • 4.1GB for SD 1.5 model
        • +
        +
      6. +
      +
      +

      Setup a container for stable-diffusion-webui

      +

      The jetson-containers project provides pre-built Docker images for stable-diffusion-webui. You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

      +
      git clone https://github.com/dusty-nv/jetson-containers
      +bash jetson-containers/install.sh
      +
      +
      +

      Info

      +

      JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

      +

      +
      +

      How to start

      +

      Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

      +
      jetson-containers run $(autotag stable-diffusion-webui)
      +
      +

      The container has a default run command (CMD) that will automatically start the webserver like this:

      +
      cd /opt/stable-diffusion-webui && python3 launch.py \
      +  --data=/data/models/stable-diffusion \
      +  --enable-insecure-extension-access \
      +  --xformers \
      +  --listen \
      +  --port=7860
      +
      +

      You should see it downloading the model checkpoint on the first run.

      +

      Open your browser and access http://<IP_ADDRESS>:7860

      +

      Results / Output Examples

      +

      +

      +

      Stable Diffusion XL

      +

      To generate even higher-quality and detailed images, check out the next part of the tutorial that uses the latest Stable Diffusion XL models!

      +
      +

      Want to explore using Python APIs to run diffusion models directly? See jetson-containers/stable-diffusion.

      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_tam.html b/tutorial_tam.html new file mode 100644 index 00000000..0aa2157c --- /dev/null +++ b/tutorial_tam.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/tutorial_text-generation.html b/tutorial_text-generation.html new file mode 100644 index 00000000..302da54a --- /dev/null +++ b/tutorial_text-generation.html @@ -0,0 +1,1605 @@ + + + + + + + + + + + + + + + + + + + + + + + + + text-generation-webui - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + + + + + +
      +
      + + + + +

      Tutorial - text-generation-webui

      +

      Interact with a local AI assistant by running a LLM with oobabooga's text-generaton-webui on NVIDIA Jetson!

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️1

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.2GB for container image
        • +
        • Spaces for models
        • +
        +
      6. +
      +
      +

      Set up a container for text-generation-webui

      +

      The jetson-containers project provides pre-built Docker images for text-generation-webui along with all of the loader API's built with CUDA enabled (llama.cpp, ExLlama, AutoGPTQ, Transformers, ect). You can clone the repo to use its utilities that will automatically pull/start the correct container for you, or you can do it manually.

      +
      git clone https://github.com/dusty-nv/jetson-containers
      +bash jetson-containers/install.sh
      +
      +
      +

      Info

      +

      JetsonHacks provides an informative walkthrough video on jetson-containers, showcasing the usage of both the stable-diffusion-webui and text-generation-webui. You can find the complete article with detailed instructions here.

      +

      +
      +

      How to start

      +

      Use jetson-containers run and autotag tools to automatically pull or build a compatible container image:

      +
      jetson-containers run $(autotag text-generation-webui)
      +
      +

      The container has a default run command (CMD) that will automatically start the webserver like this:

      +
      cd /opt/text-generation-webui && python3 server.py \
      +  --model-dir=/data/models/text-generation-webui \
      +  --chat \
      +  --listen
      +
      +

      Open your browser and access http://<IP_ADDRESS>:7860.

      +

      Download a model on web UI

      +

      See the oobabooga documentation for instructions for downloading models - either from within the web UI, or using download-model.py

      +
      jetson-containers run --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \
      +  'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'
      +
      +

      From within the web UI, select Model tab and navigate to "Download model or LoRA" section.

      +

      You can find text generation models on Hugging Face Hub, then enter the Hugging Face username/model path (which you can have copied to your clipboard from the Hub). Then click the Download button.

      +

      GGUF models

      +

      The fastest oobabooga model loader to use is currently llama.cpp with 4-bit quantized GGUF models.

      +

      You can download a single model file for a particular quantization, like *.Q4_K_M.bin. Input the file name and hit Download button.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      ModelQuantizationMemory (MB)
      TheBloke/Llama-2-7b-Chat-GGUFllama-2-7b-chat.Q4_K_M.gguf5,268
      TheBloke/Llama-2-13B-chat-GGUFllama-2-13b-chat.Q4_K_M.gguf8,609
      TheBloke/LLaMA-30b-GGUFllama-30b.Q4_K_S.gguf19,045
      TheBloke/Llama-2-70B-chat-GGUFllama-2-70b-chat.Q4_K_M.gguf37,655
      +

      +
      +

      Info

      +

      Model selection for Jetson Orin Nano

      +

      Jetson Orin Nano Developer Kit has only 8GB RAM for both CPU (system) and GPU, so you need to pick a model that fits in the RAM size - see the Model Size section below. The 7B models with 4-bit quantization are the ones to use on Jetson Orin Nano. Make sure you go through the RAM optimization steps before attempting to load such model on Jetson Orin Nano.

      +
      +

      Load a model

      +

      After you have downloaded a model, click the 🔄 button to refresh your model list, and select the model you want to use.

      +

      For a GGUF model, remember to

      +
        +
      • Set n-gpu-layers to 128
      • +
      • Set n_gqa to 8 if you using Llama-2-70B (on Jetson AGX Orin 64GB)
      • +
      +

      Then click the Load button.

      +

      Chat Template

      +

      If you're using a Llama model fine-tuned for chat, like the models listed above (except for LLaMA-30b), you need to use the oobabooga Instruct mode and set the template. On the Parameters tab, go to the Instruction Template sub-tab, then select Llama-v2 from the Instruction Template drop-down (or Vicuna, Guanaco, ect if you are using one of those models)

      +
      +

      For the base text completion models (like LLaMA-30b), use the Default or Notebook tab.

      +
      +

      Selecting the right chat template will make sure the model is being prompted correctly - you can also change the system prompt in the Context box to alter the agent's personality and behavior. There are a lot of other settings under the Generation tab, like the maximum length it should output per reply, and token sampling parameters like temperature and top_p for controlling randomness.

      +

      Then change back to the Chat tab, and under the mode section, make sure Instruct is selected (confusingly, not chat mode). Then you can start chatting with the LLM!

      +

      Results

      +

      +

      Things to do with your LLM

      +

      Here are some common test prompts for coding, math, history ect. You can also ask it about geography, travel, nature, recipies, fixing things, general life advice, and practically everything else. Also Llama-2 is quite playful and likes to play games to test its logic abilities!

      +
      >> What games do you like to play?
      +
      +I'm a large language model, so I can play text-based games and answer questions on a wide variety of topics. Here are some game ideas:
      +
      +- 20 Questions:  You or I can think of an object, and the other player tries to guess what it is by asking up to 20 yes-or-no questions.  The goal is to guess the object in as few questions as possible.
      +- Hangman: I can think of a word or phrase and you try to guess it by suggesting letters. Each incorrect guess will bring the hangman one step closer to being complete!
      +- Word association: I'll give you a word and you have to come up with a word that is related to it. We can keep playing back and forth as long as you'd like!
      +- Trivia: I can provide questions on a wide range of topics, from history to science to pop culture. See how many questions you can answer correctly!
      +- Storytelling: I can start telling a story and then stop at a cliffhanger. You can then try to guess what happens next or even take over the storytelling and continue it in your own
      +- Jokes: I love to tell jokes and make people laugh with my "Dad humor"!  Knock knock!  *giggles*
      +
      +

      Model size tested

      +

      With llama.cpp, GGUF model, 4-bit quantization:

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Model sizeJetson AGX Orin 64GBJetson AGX Orin 32GBJetson Orin Nano 8GB
      70B model
      30B model
      13B model
      7B model
      +
      +

      Want to explore using Python APIs to run LLMs directly?
      +See jetson-containers for its LLM related packages and containers.

      +
      +
      +
      +
        +
      1. +

        Limited to 7B model (4-bit quantized). 

        +
      2. +
      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tutorial_whisper.html b/tutorial_whisper.html new file mode 100644 index 00000000..f1724d35 --- /dev/null +++ b/tutorial_whisper.html @@ -0,0 +1,1429 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Whisper - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - Whisper

      +

      Let's run OpenAI's Whisper, pre-trained model for automatic speech recognition on Jetson!

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.1 GB for whisper container image
        • +
        • Space for checkpoints
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use run.sh and autotag script to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag whisper)
      +
      +

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server, with SSL enabled.

      +

      Open your browser and access https://<IP_ADDRESS>:8888.

      +
      +

      Attention

      +

      Note it is https (not http).

      +

      HTTPS (SSL) connection is needed to allow ipywebrtc widget to have access to your microphone (for record-and-transcribe.ipynb).

      +
      +

      You will see a warning message like this.

      +

      +

      Press "Advanced" button and then click on "Proceed to (unsafe)" link to proceed to the Jupyter Lab web interface.

      +

      +
      +

      The default password for Jupyter Lab is nvidia.

      +
      +

      Run Jupyter notebooks

      +

      Whisper repo comes with demo Jupyter notebooks, which you can find under /notebooks/ directory.

      +

      jetson-containers also adds one convenient notebook (record-and-transcribe.ipynb) to record your audio sample on Jupyter notebook in order to run transcribe on your recorded audio.

      +

      +

      record-and-transcribe.ipynb

      +

      This notebook is to let you record your own audio sample using your PC's microphone and apply Whisper's medium model to transcribe the audio sample.

      +

      It uses Jupyter notebook/lab's ipywebrtc extension to record an audio sample on your web browser.

      +

      +
      +

      Attention

      +

      When you click the ⏺ botton, your web browser may show a pop-up to ask you to allow it to use your microphone. Be sure to allow the access.

      +

      +
      +Final check +

      Once done, if you click on the "⚠ Not secure" part in the URL bar, you should see something like this.

      +

      +
      +
      +

      Result

      +

      Once you go through all the steps, you should see the transcribe result in text like this.

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/index.html b/vit/index.html new file mode 100644 index 00000000..20e56486 --- /dev/null +++ b/vit/index.html @@ -0,0 +1,1291 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Vision Transformers - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Vision Transformers

      +

      You can run following Vision Transfer models and applications quickly on Jetson by using jetson-containers.

      +

      Efficient ViT

      +

      +

      NanoSAM

      +

      +

      NanoOWL

      +

      +

      SAM

      +

      +

      TAM

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_efficientvit.html b/vit/tutorial_efficientvit.html new file mode 100644 index 00000000..fdf6a37f --- /dev/null +++ b/vit/tutorial_efficientvit.html @@ -0,0 +1,1439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + EfficientViT - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - EfficientViT

      +

      Let's run MIT Han Lab's EfficientViT on Jetson!

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 10.9 GB for efficientvit container image
        • +
        • Space for checkpoints
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag efficientvit)
      +
      +

      Usage of EfficientViT

      +

      The official EfficientViT repo shows the complete usage information: https://github.com/mit-han-lab/efficientvit#usage

      +

      Run example/benchmark

      +

      Inside the container, a small benchmark script benchmark.py is added under /opt/efficientvit directory by the jetson-container build process.

      +

      It is to test EfficientViT-L2-SAM in bounding box mode, so we can use this as an example and verify the output.

      +

      Download l2.pt model

      +
      mkdir -p /data/models/efficientvit/sam/
      +cd /data/models/efficientvit/sam/
      +wget https://huggingface.co/han-cai/efficientvit-sam/resolve/main/l2.pt
      +
      +
      +

      The downloaded checkpoint file is stored on the /data/ directory that is mounted from the Docker host.

      +
      +

      Run benchmark script

      +
      cd /opt/efficientvit
      +python3 ./benchmark.py
      +
      +

      At the end you should see a summary like the following.

      +
      AVERAGE of 2 runs:
      +  encoder --- 0.062 sec
      +  latency --- 0.083 sec
      +Memory consumption :  3419.68 MB
      +
      +

      Check the output/result

      +

      The output image file (of the last inference result) is stored as /data/benchmarks/efficientvit_sam_demo.png.

      +

      It is stored under /data/ directory that is mounted from the Docker host.
      +So you can go back to your host machine, and check jetson-containers/data/benchmark/ directory.

      +

      You should find the output like this.

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_nanoowl.html b/vit/tutorial_nanoowl.html new file mode 100644 index 00000000..82b8a3a3 --- /dev/null +++ b/vit/tutorial_nanoowl.html @@ -0,0 +1,1405 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NanoOWL - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - NanoOWL

      +

      Let's run NanoOWL, OWL-ViT optimized to run real-time on Jetson with NVIDIA TensorRT.

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 7.2 GB for container image
        • +
        • Spaces for models
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag nanoowl)
      +
      +

      How to run the tree prediction (live camera) example

      +
        +
      1. +

        Ensure you have a camera device connected

        +
        ls /dev/video*
        +
        +

        If no video device is found, exit from the container and check if you can see a video device on the host side.

        +
      2. +
      3. +

        Launch the demo +

        cd examples/tree_demo
        +python3 tree_demo.py ../../data/owl_image_encoder_patch32.engine
        +

        +
        +

        Info

        +

        If it fails to find or load the TensorRT engine file, build the TensorRT engine for the OWL-ViT vision encoder on your Jetson device.

        +
        python3 -m nanoowl.build_image_encoder_engine \
        +    data/owl_image_encoder_patch32.engine
        +
        +
        +
      4. +
      5. +

        Second, open your browser to http://<ip address>:7860

        +
      6. +
      7. +

        Type whatever prompt you like to see what works!

        +

        Here are some examples

        +
          +
        • Example: [a face [a nose, an eye, a mouth]]
        • +
        • Example: [a face (interested, yawning / bored)]
        • +
        • Example: (indoors, outdoors)
        • +
        +
      8. +
      +

      Result

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_nanosam.html b/vit/tutorial_nanosam.html new file mode 100644 index 00000000..d4bcd1ec --- /dev/null +++ b/vit/tutorial_nanosam.html @@ -0,0 +1,1373 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NanoSAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - NanoSAM

      +

      Let's run NVIDIA's NanoSAM to check out the performance gain by distillation.

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.3GB for container image
        • +
        • Spaces for models
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag nanosam)
      +
      +

      Run examples

      +

      Inside the container, you can move to /opt/nanosam directory, to go through all the examples demonstrated on the repo.

      +
      cd /opt/nanosam
      +
      +

      To run the "Example 1 - Segment with bounding box":

      +
      python3 examples/basic_usage.py \
      +    --image_encoder="data/resnet18_image_encoder.engine" \
      +    --mask_decoder="data/mobile_sam_mask_decoder.engine"
      +
      +

      The result is saved under /opt/nanosam/data/basic_usage_out.jpg.

      +

      To check on your host machine, you can copy that into /data directory of the container where that is mounted from the host.

      +
      cp data/basic_usage_out.jpg /data/
      +
      +

      Then you can go to your host system, and find the file under jetson-containers/data/basic_usage_out.jpg

      +

      Results

      +

      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_sam.html b/vit/tutorial_sam.html new file mode 100644 index 00000000..04b6cd2e --- /dev/null +++ b/vit/tutorial_sam.html @@ -0,0 +1,1378 @@ + + + + + + + + + + + + + + + + + + + + + + + + + SAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - SAM (Segment Anything)

      +

      Let's run Meta's SAM on NVIDIA Jetson.

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson devices:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB) +Jetson Orin NX (16GB) +Jetson Orin Nano (8GB)⚠️1

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x) +JetPack 6 (L4T r36.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.8GB for container image
        • +
        • Spaces for models
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag sam)
      +
      +

      The container has a default run command (CMD) that will automatically start the Jupyter Lab server.

      +

      Open your browser and access http://<IP_ADDRESS>:8888.

      +
      +

      The default password for Jupyter Lab is nvidia.

      +
      +

      Run Jupyter notebook

      +

      In Jupyter Lab, navigate to notebooks and open automatic_mask_generator_example.py notebook.

      +

      Create a new cell at the top, insert the model download command below and run the cell.

      +
      !wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
      +
      +

      Then go through executing all the cells below Set-up.

      +

      Results

      +

      +
      +
      +
        +
      1. +

        The biggest vit_h (2.4GB) model may not ran due to OOM, but vit_l (1.1GB) runs on Jetson Orin Nano. 

        +
      2. +
      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vit/tutorial_tam.html b/vit/tutorial_tam.html new file mode 100644 index 00000000..0c2e0d0c --- /dev/null +++ b/vit/tutorial_tam.html @@ -0,0 +1,1430 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TAM - NVIDIA Jetson AI Lab + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + + + + + + +
      + +
      + + + + +
      +
      + + + +
      +
      +
      + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + +

      Tutorial - SAM (Segment Anything)

      +

      Let's run TAM to perform Segment Anything on videos on NVIDIA Jetson.

      +

      +
      +

      What you need

      +
        +
      1. +

        One of the following Jetson:

        +

        Jetson AGX Orin (64GB) +Jetson AGX Orin (32GB)

        +
      2. +
      3. +

        Running one of the following versions of JetPack:

        +

        JetPack 5 (L4T r35.x)

        +
      4. +
      5. +

        Sufficient storage space (preferably with NVMe SSD).

        +
          +
        • 6.8GB for container image
        • +
        • Spaces for models
        • +
        +
      6. +
      7. +

        Clone and setup jetson-containers:

        +
        git clone https://github.com/dusty-nv/jetson-containers
        +bash jetson-containers/install.sh
        +
        +
      8. +
      +
      +

      How to start

      +

      Use the jetson-containers run and autotag commands to automatically pull or build a compatible container image.

      +
      jetson-containers run $(autotag tam)
      +
      +

      The container has a default run command (CMD) that will automatically start TAM's web server.

      +

      Open your browser and access http://<IP_ADDRESS>:12212.

      +

      TAM web UI

      +

      Check out the official tutorial to learn how to operate the web UI.

      + + +

      Results

      + + +

      Troubleshooting

      +

      FileNotFoundError: [Errno 2] No such file or directory: './checkpoints/E2FGVI-HQ-CVPR22.pth'

      +

      You may find the TAM app fails to download a checkpoint file E2FGVI-HQ-CVPR22.pth.

      +
      Downloading checkpoints from Google Drive... tips: If you cannot see the progress bar, please try to download it manuall               and put it in the checkpointes directory. E2FGVI-HQ-CVPR22.pth: https://github.com/MCG-NKU/E2FGVI(E2FGVI-HQ model)
      +Access denied with the following error:
      +
      +        Cannot retrieve the public link of the file. You may need to change
      +        the permission to 'Anyone with the link', or have had many accesses. 
      +
      +You may still be able to access the file from the browser:
      +
      +         https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 
      +
      +

      You can manually download the checkpoint file on your Docker host machine.

      +
      cd jetson-containers/
      +pip install gdown
      +source ~/.profile
      +gdown https://drive.google.com/uc?id=10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3 
      +mv E2FGVI-HQ-CVPR22.pth ./data/models/tam/
      +
      +

      And you can try running the TAM container.

      +
      jetson-containers run $(autotag tam)
      +
      + + + + + + + + +
      +
      + + + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + + + + + \ No newline at end of file