Skip to content

Commit

Permalink
created visualize page
Browse files Browse the repository at this point in the history
  • Loading branch information
ombhojane committed Feb 11, 2024
1 parent f2aff25 commit 0e8c7f8
Show file tree
Hide file tree
Showing 6 changed files with 122 additions and 5 deletions.
Binary file modified __pycache__/app.cpython-311.pyc
Binary file not shown.
78 changes: 78 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
from flask import Flask, render_template, request, session
import google.generativeai as genai
import os
# import torch
# from PIL import Image
# import numpy as np
# from flask import jsonify, send_from_directory
# from werkzeug.utils import secure_filename


app = Flask(__name__)
app.secret_key = os.urandom(24) # Necessary for session management
Expand Down Expand Up @@ -45,6 +51,78 @@ def format_response(response):
formatted_response = "<ul>" + "\n".join(formatted_lines) + "</ul>" if formatted_lines else response
return formatted_response.replace("<ul></ul>", "") # Remove empty list tags

# adding visualiztions

# device = "cuda" if torch.cuda.is_available() else "cpu"

# UPLOAD_FOLDER = 'uploads'
# GENERATED_FOLDER = 'generated'
# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# app.config['GENERATED_FOLDER'] = GENERATED_FOLDER

# os.makedirs(UPLOAD_FOLDER, exist_ok=True)
# os.makedirs(GENERATED_FOLDER, exist_ok=True)

# def generate_image_with_ml_model(image_path, prompt):
# # Load the image
# input_image = load_image(image_path).to(device)

# # Initialize the depth estimator
# depth_estimator = pipeline("depth-estimation", device=device)

# # Process to obtain depth map
# depth_map = get_depth_map(input_image, depth_estimator) # Assuming get_depth_map is defined similarly to your Colab code

# # Initialize the ControlNet model and pipeline
# controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal", torch_dtype=torch.float16, use_safetensors=True).to(device)
# pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
# "runwayml/stable-diffusion-v1-5",
# controlnet=controlnet,
# torch_dtype=torch.float16,
# use_safetensors=True
# ).to(device)
# pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# pipe.enable_model_cpu_offload()

# # Generate the image
# output = pipe(prompt=prompt, image=input_image, control_image=depth_map).images[0]

# # Convert tensor to PIL Image for saving
# output_image = Image.fromarray(output.mul(255).clamp(0, 255).byte().cpu().numpy().astype(np.uint8).transpose(1, 2, 0))

# return output_image

# @app.route('/generate-image', methods=['POST'])
# def generate_image_endpoint():
# if 'image' not in request.files:
# return jsonify({'error': 'No image part'}), 400
# file = request.files['image']
# prompt = request.form.get('prompt', '') # Get the prompt from the form data
# if file.filename == '':
# return jsonify({'error': 'No selected file'}), 400
# if file and prompt:
# filename = secure_filename(file.filename)
# input_filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# file.save(input_filepath)

# # Generate the image
# output_image = generate_image_with_ml_model(input_filepath, prompt)
# output_filename = f"generated_{filename}"
# output_filepath = os.path.join(app.config['GENERATED_FOLDER'], output_filename)
# output_image.save(output_filepath)

# return jsonify({'generatedImageUrl': f'/generated/{output_filename}'})
# else:
# return jsonify({'error': 'Invalid request'}), 400

# @app.route('/generated/<filename>')
# def generated_image(filename):
# return send_from_directory(app.config['GENERATED_FOLDER'], filename)


@app.route('/visualize')
def visualize():
return render_template('visualize.html')

@app.route('/')
def index():
Expand Down
8 changes: 4 additions & 4 deletions static/predict.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,16 @@ tailwind.init();
var farmImage = document.getElementById('farmImage');

if (landSize >= 1 && landSize < 10) {
farmImage.src = 'https://raw.githubusercontent.com/ombhojane/letschalokisaanai/main/assets/small.png?token=GHSAT0AAAAAACLEWSAUXKUNPOKOU7YRSYYQZOJFMJQ';
farmImage.src = 'https://github.com/ombhojane/chalokisaanai/blob/main/assets/small.png?raw=true';
farmImage.classList.remove('hidden');
} else if (landSize >= 10 && landSize < 20) {
farmImage.src = 'https://raw.githubusercontent.com/ombhojane/letschalokisaanai/main/assets/mid.png?token=GHSAT0AAAAAACLEWSAUL2LMUOLEMOF2JFM2ZOJFM2A';
farmImage.src = 'https://github.com/ombhojane/chalokisaanai/blob/main/assets/mid.png?raw=true';
farmImage.classList.remove('hidden');
} else if (landSize >= 20 && landSize < 25) {
farmImage.src = 'https://raw.githubusercontent.com/ombhojane/letschalokisaanai/main/assets/extramid.png?token=GHSAT0AAAAAACLEWSAU5U5RT3JIOVNKNZDSZOJFNIA';
farmImage.src = 'https://github.com/ombhojane/chalokisaanai/blob/main/assets/large.png?raw=true';
farmImage.classList.remove('hidden');
} else if (landSize >= 25) {
farmImage.src = 'https://raw.githubusercontent.com/ombhojane/letschalokisaanai/main/assets/large.png?token=GHSAT0AAAAAACLEWSAVVWAAAKGCSNDKOKFUZOJFNUA';
farmImage.src = 'https://github.com/ombhojane/chalokisaanai/blob/main/assets/extramid.png?raw=true';
farmImage.classList.remove('hidden');
} else {
farmImage.classList.add('hidden');
Expand Down
1 change: 1 addition & 0 deletions templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
</div>
<div class="nav-links hidden md:flex">
<a href="/predict" class="text-gray-600 mx-2">Start Planning</a>
<a href="/visualize" class="text-gray-600 mx-2">Visualize</a>
<a href="/generate" class="text-gray-600 mx-2">Generate</a>
<a href="#" class="text-blue-600 mx-2">How it works?</a>
</div>
Expand Down
2 changes: 1 addition & 1 deletion templates/predict.html
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
<div class="bg-white p-8 shadow-lg rounded-lg max-w-3xl mx-auto">
<h1 class="text-4xl font-bold mb-6 text-green-700">Plan your next agrotourism service</h1>
<div class="relative h-0" style="padding-bottom: 65%;">
<img src="https://raw.githubusercontent.com/ombhojane/letschalokisaanai/main/assets/planning.png?token=GHSAT0AAAAAACLEWSAUVSOJGOSWEMHTXDT2ZOJFOBQ" alt="Agrotourism Image" class="absolute inset-0 w-full object-cover rounded-lg" />
<img src="https://github.com/ombhojane/chalokisaanai/blob/main/assets/extramid.png" alt="Agrotourism Image" class="absolute inset-0 w-full object-cover rounded-lg" />
</div>
<form>
<div class="form-section">
Expand Down
38 changes: 38 additions & 0 deletions templates/visualize.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Image Generation</title>
</head>
<body>
<h2>Image Generation Form</h2>
<form id="generateForm" enctype="multipart/form-data">
<input type="file" id="image" name="image" required>
<button type="submit">Generate Image</button>
</form>
<h3>Generated Image:</h3>
<img id="outputImage" src="" alt="Generated Image" style="max-width: 500px;">

<script>
document.getElementById('generateForm').addEventListener('submit', function(e) {
e.preventDefault(); // Prevent the default form submission

const formData = new FormData();
const imageInput = document.getElementById('image');
if (imageInput.files.length > 0) {
formData.append('image', imageInput.files[0]);
}

fetch('/generate-image', {
method: 'POST',
body: formData,
})
.then(response => response.json())
.then(data => {
document.getElementById('outputImage').src = data.generatedImageUrl;
})
.catch(error => console.error('Error:', error));
});
</script>
</body>
</html>

0 comments on commit 0e8c7f8

Please sign in to comment.