Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add page to test generators/deployment endpoints #2

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions constants/endpoints-constants.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
export const appUrl = `https://local.webaverse.com`;
export const weaviateUrl = `https://weaviate.weabaverse.com`;
export const stableDiffusionUrl = `https://stable-diffusion.webaverse.com`;
export const gpt3Url = `https://gpt3.webaverse.com`; // create
export const voiceUrl = `https://voice-cw.webaverse.com`;
export const diffsoundUrl = `https://diffsound.webaverse.com`;
export const diffsoundUrl = `https://diffsound.webaverse.com`;
export const motionDiffusionUrl = `https://motion-diffusion.webaverse.com`; // create
export const stableDreamfusionUrl = `https://stable-dreamfusion.webaverse.com`; // create
export const get3dUrl = `https://get-3d.webaverse.com`; // create
export const musicGeneratorUrl = `https://music-generator.webaverse.com`; // create
export const discoDiffusionUrl = `https://disco-diffusion.webaverse.com`; // create
// coreweave
export const pixelArtUrl = `https://pixel-art.webaverse.com`;
export const weaviateUrl = `https://weaviate.webaverse.com`;
export const stableDiffusionUrl = `https://stable-diffusion.webaverse.com`;
export const blipUrl = `https://blip.webaverse.com`;
82 changes: 82 additions & 0 deletions generators/generator.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// import {stableDiffusionUrl} from '../../constants/endpoints.js'
import fetch from 'node-fetch'
import {
voiceUrl,
stableDiffusionUrl,
diffsoundUrl,
pixelArtUrl,
blipUrl,
} from '../constants/endpoints-constants'

export const generateVoice = () => async ({s, voice} = {}) => {
return `${voiceUrl}/tts?s=${s}&voice=${voice}`
}

export const generateImage = ({
modelName,
prefix,
}) => async ({
name,
description,
} = {}) => {
const s = `${prefix} ${description}`
const u = `${stableDiffusionUrl}/image?s=${encodeURIComponent(s)}&model=${modelName}`
const res = await fetch(u)
if (res.ok) {
const arrayBuffer = await res.arrayBuffer()
if (arrayBuffer.byteLength > 0) {
return arrayBuffer
} else {
throw new Error(`generated empty image`)
}
} else {
throw new Error(`invalid status: ${res.status}`)
}
}

export const generateDiffSound = () => async ({s} = {}) => {
return `${diffsoundUrl}/sound?s=${s}`
}

export const generatePixelArt = () => async () => {
const delay = ms => new Promise(res => setTimeout(res, ms))
let queryId = ''
const generate = `${pixelArtUrl}/generate?steps=25&seed=30&s=snowy mountains`
await fetch(generate)
.then(r => r.json())
.then(d => {queryId = d.id})
.catch()
await delay(50000)
const pixelArt = `${pixelArtUrl}/generate_result?query_id=${queryId}`
const res = await fetch(pixelArt)
if (res.ok) {
const arrayBuffer = await res.arrayBuffer()
if (arrayBuffer.byteLength > 0) {
return arrayBuffer
} else {
throw new Error(`generated empty image`)
}
} else {
throw new Error(`invalid status: ${res.status}`)
}
}

// export const generateDiffSound = () => async ({s} = {}) => {
export const generateBlipResult = () => async ({s} = {}) => {
const u = `${blipUrl}/upload/url?task=image_captioning&img_url=${s}`

await fetch(u, {
mode: 'no-cors',
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': '*',
'Cross-Origin-Embedder-Policy': 'same-origin'
}
})
.then(r => r.json())
.then(d => {
console.log(d)
return d
})
.catch(e => {console.log(e)})
}
292 changes: 292 additions & 0 deletions pages/generators.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,292 @@
import Head from 'next/head'
import {useState} from 'react'
import styles from '../styles/Home.module.css'
import {
generateVoice,
generateImage,
generateDiffSound,
generatePixelArt,
generateBlipResult,
} from '../generators/generator'

// import Reader from 'riff-wave-reader/lib/reader'

export default function Generators() {
const [loadingVoice, setLoadingVoice] = useState(false)
const [generatedVoice, setGeneratedVoice] = useState(null)
const [transcript, setTranscript] = useState('')
const [voice, setVoice] = useState('')

const [loadingImage, setLoadingImage] = useState(false)
const [generatedImage, setGeneratedImage] = useState(null)

const [loadingSound, setLoadingSound] = useState(false)
const [generatedSound, setGeneratedSound] = useState(null)
const [sound, setSound] = useState('')

const [loadingPixelArt, setLoadingPixelArt] = useState(false)
const [generatedPixelArt, setGeneratedPixelArt] = useState(null)

const [loadingBlip, setLoadingBlip] = useState(false)
const [generatedBlip, setGeneratedBlip] = useState('')
const [blipImageUrl, setBlipImageUrl] = useState('')

// generateVoice
const handleTranscript = e => {
setTranscript(e.target.value)
}
const handleVoice = e => {
setVoice(e.target.value)
}
async function generateTestVoice() {
const newVoice = generateVoice()
const voiceArrayBuffer = await newVoice({s: transcript, voice})
const blob = new Blob([await (await fetch(voiceArrayBuffer)).arrayBuffer()])
const audioFromBlob = URL.createObjectURL(blob)
setGeneratedVoice(audioFromBlob)
setLoadingVoice(false)
}

// generateImage
async function generateTestImage() {
setLoadingImage(true)
let description = 'test generate image'
const arrayBuffer = generateImage({
modelName: null,
s: 'test',
})
let imgArrayBuffer = await arrayBuffer(description)

const blob = new Blob([imgArrayBuffer], {
type: 'image/png',
})
const image = URL.createObjectURL(blob)
setGeneratedImage(image)
setLoadingImage(false)
}

// generateDiffSound
const handleSound = e => {
setSound(e.target.value)
}
async function generateTestDiffSound() {
// setGeneratedSound(true)
const newSound = generateDiffSound()
const soundArrayBuffer = await newSound({s: sound})
const blob = new Blob([await (await fetch(soundArrayBuffer)).arrayBuffer()])
const soundFromBlob = URL.createObjectURL(blob)
setGeneratedSound(soundFromBlob)
setLoadingSound(false)
}

// generate pixel art
async function generateTestPixelArt() {
setLoadingPixelArt(true)
const newPixelArt = generatePixelArt()
const pixelArtBuffer = await newPixelArt()

const blob = new Blob([pixelArtBuffer], {
type: 'image/png',
})
const image = URL.createObjectURL(blob)
setGeneratedPixelArt(image)
setLoadingPixelArt(false)
}

// generate BLIP result
const handleBlip = e => {
setBlipImageUrl(e.target.value)
}
async function generateBlip() {
const newBlip = generateBlipResult()
const result = await newBlip({s: blipImageUrl})
setGeneratedBlip(result)
setLoadingBlip(false)
}

// TODO styling!!
return (
<div className={styles.container}>
<Head>
<title>Test - Generators</title>
<meta name="description" content="Lore engine" />
</Head>

<main className={styles.main}>
<p>Generate voice using tiktalknet</p>
<div>Endpoints:
<p>/tts?s={"{s}"}&voice={"{voice}"}</p>
<ul>
<li>s (string): text to convert</li>
<li>voice (string | optional): the id of the voice to use</li>
</ul>
</div>
<button
// style={main-btn}
// onClick={generateTestVoice}
onClick={() => setLoadingVoice(true)}
>
Generate Test Voice
</button>
{loadingVoice &&
<div>
<input
type='text'
id='transcript'
name='transcript'
placeholder='Transcript'
onChange={handleTranscript}
value={transcript}
/>
<br />
<input
type='text'
id='voice'
name='voice'
placeholder='Voice ID'
onChange={handleVoice}
value={voice}
/>
<br />
<button onClick={generateTestVoice}>Generate</button>
</div>
}
{!loadingVoice && !generatedVoice && <p>No voice data</p>}
{!loadingVoice && generatedVoice &&
<div>
<br />
<audio controls src={generatedVoice}></audio>
<br />
</div>
}
<hr />
<p>Generate image using Stable Diffusion</p>
<div>Endpoints:
<p>/image?s={"{s}"}&model={"{model}"}</p>
<ul>
<li>s (string): image url</li>
<li>model (string | optional): the id of the model to use</li>
</ul>
</div>
<button
// style={main-btn}
onClick={generateTestImage}
>
Generate Test Image
</button>
{loadingImage && <p>Loading...</p>}
{!loadingImage && !generatedImage && <p>No image data</p>}
{!loadingImage && generatedImage &&
<div>
<br />
<img src={generatedImage} alt='image' />
<br />
</div>
}
<hr />
<p>Generate sound with Diffsound (AWS)</p>
<div>Endpoints:
<p>/sound?s={"{s}"}</p>
<ul>
<li>s (string): text to convert</li>
</ul>
</div>
<button onClick={() => setLoadingSound(true)}>
Generate Test Sound
</button>
{loadingSound &&
<div>
<input
type='text'
id='sound'
name='sound'
placeholder='Sound'
onChange={handleSound}
value={sound}
/>
<br />
<button onClick={generateTestDiffSound}>Generate</button>
</div>
}
{!loadingSound && !generatedSound && <p>No sound data</p>}
{!loadingSound && generatedSound &&
<div>
<br />
<audio controls src={generatedSound}></audio>
<br />
</div>
}
<hr />
<p>Generate Pixel Art (AWS)</p>
<div>Endpoints:
<p>/generate : kick off a new image generation job and add it to the backlog. returns the id of the job</p>
<p>/generate_result : retrieve </p>
<p>/prompt_tags : returns the current tags added to prompts</p>
</div>
<button
onClick={generateTestPixelArt}
>
Generate Pixel Art
</button>
{loadingPixelArt && <p>Loading, can take up to one minute...</p>}
{!loadingPixelArt && !generatedPixelArt && <p>No Pixel Art data</p>}
{!loadingPixelArt && generatedPixelArt &&
<div>
<br />
<img src={generatedPixelArt} alt='image' />
<br />
</div>
}
<hr />
<p>Generate image captioning using BLIP</p>
<div>Endpoints:
<p>POST /upload</p>
<ul>
<li>FormData task (string | optional): the task to run (image_captioning, vqa, feature_extraction or text_matching)</li>
<li>FormData file (file): the image to get the text caption</li>
</ul>
<p>POST /upload/url</p>
<p>Body:</p>
<p>
{"{"}
"task": {"<task>"},
"file": {"<string>"}
{"}"}
</p>
<ul>
<li>Body task (string | optional): the task to run (image_captioning, vqa, feature_extraction or text_matching)</li>
<li>Body file (string): the image url to get the text caption</li>
</ul>
</div>
<button
onClick={() => setLoadingBlip(true)}
>
Generate BLIP data
</button>
{loadingBlip &&
<div>
<input
type='text'
id='img_url'
name='img_url'
placeholder='Image URL'
onChange={handleBlip}
value={blipImageUrl}
/>
<br />
<button onClick={generateBlip}>Generate</button>
</div>
}
{!loadingBlip && !generatedBlip && <p>No BLIP data</p>}
{!loadingBlip && generatedBlip &&
<div>
<br />
<span>{generatedBlip}</span>
<br />
</div>
}
<hr />
</main>
</div>
)
}